[llvm] [WIP][X86] lowerBuildVectorAsBroadcast - don't convert constant vectors to broadcasts on AVX512VL targets (PR #73509)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 8 03:56:55 PST 2023


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/73509

>From adc89f0d37864fc663722bbf6b02d3c67780a7d4 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 21 Nov 2023 12:56:47 +0000
Subject: [PATCH] [X86] Disable lowering constant build vectors to broadcasts
 on AVX512 targets

On AVX512 targets we're better off keeping constant vector at full width to ensure that they can be load folded into vector instructions, reducing register pressure.

If a vector constant remains as a basic load, X86FixupVectorConstantsPass will still convert this to a broadcast instruction for us.

Non-VLX targets are still seeing some regressions due to these being implicitly widened to 512-bit ops in isel patterns and not in the DAG, so I've limited this to just 512-bit vectors for now.
---
 .../Target/X86/X86FixupVectorConstants.cpp    |   121 +-
 llvm/lib/Target/X86/X86ISelLowering.cpp       |     8 +
 llvm/lib/Target/X86/X86InstrFoldTables.cpp    |     2 +
 .../any_extend_vector_inreg_of_broadcast.ll   |    24 +-
 ...d_vector_inreg_of_broadcast_from_memory.ll |    26 +-
 .../X86/avx512-shuffles/partial_permute.ll    |    14 +-
 llvm/test/CodeGen/X86/avx512fp16-arith.ll     |    67 +-
 .../X86/avx512fp16-combine-xor-vfmulc.ll      |     3 +-
 llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll  |    22 +-
 llvm/test/CodeGen/X86/bitcast-vector-bool.ll  |    80 +-
 .../X86/broadcast-elm-cross-splat-vec.ll      |     7 -
 llvm/test/CodeGen/X86/combine-bitselect.ll    |     2 +-
 llvm/test/CodeGen/X86/combine-pmuldq.ll       |     6 +-
 llvm/test/CodeGen/X86/combine-sdiv.ll         |     4 +-
 .../CodeGen/X86/expand-vp-fp-intrinsics.ll    |     4 +-
 llvm/test/CodeGen/X86/fp-round.ll             |     7 +-
 llvm/test/CodeGen/X86/icmp-abs-C-vec.ll       |    36 +-
 .../test/CodeGen/X86/masked_gather_scatter.ll |    10 +-
 llvm/test/CodeGen/X86/movmsk-cmp.ll           |   358 +-
 llvm/test/CodeGen/X86/shuffle-half.ll         |     3 +-
 llvm/test/CodeGen/X86/sse2.ll                 |    31 +-
 .../CodeGen/X86/vec-strict-inttofp-128.ll     |     4 +-
 llvm/test/CodeGen/X86/vec_fabs.ll             |   127 +-
 llvm/test/CodeGen/X86/vec_fcopysign.ll        |   186 +-
 llvm/test/CodeGen/X86/vec_fneg.ll             |   126 +-
 llvm/test/CodeGen/X86/vector-fshl-256.ll      |     4 +-
 llvm/test/CodeGen/X86/vector-fshl-512.ll      |     4 +-
 llvm/test/CodeGen/X86/vector-fshl-rot-256.ll  |     4 +-
 llvm/test/CodeGen/X86/vector-fshl-rot-512.ll  |     4 +-
 llvm/test/CodeGen/X86/vector-fshr-256.ll      |     4 +-
 llvm/test/CodeGen/X86/vector-fshr-512.ll      |     4 +-
 llvm/test/CodeGen/X86/vector-fshr-rot-256.ll  |     4 +-
 llvm/test/CodeGen/X86/vector-fshr-rot-512.ll  |     4 +-
 .../vector-interleaved-load-i16-stride-4.ll   |   130 +-
 .../vector-interleaved-load-i16-stride-5.ll   |    14 +-
 .../vector-interleaved-load-i16-stride-7.ll   |  1711 +-
 .../vector-interleaved-load-i16-stride-8.ll   |   724 +-
 .../vector-interleaved-load-i32-stride-4.ll   |   329 +-
 .../vector-interleaved-load-i32-stride-6.ll   |   328 +-
 .../vector-interleaved-load-i32-stride-7.ll   |  2096 +-
 .../vector-interleaved-load-i32-stride-8.ll   |  3717 ++-
 .../vector-interleaved-load-i64-stride-4.ll   |   877 +-
 .../vector-interleaved-load-i64-stride-5.ll   |   100 +-
 .../vector-interleaved-load-i64-stride-6.ll   |  4158 ++--
 .../vector-interleaved-load-i64-stride-7.ll   |  4949 ++--
 .../vector-interleaved-load-i64-stride-8.ll   |  6312 ++---
 .../vector-interleaved-load-i8-stride-3.ll    |    18 +-
 .../vector-interleaved-load-i8-stride-4.ll    |     6 +-
 .../vector-interleaved-store-i16-stride-6.ll  |   521 +-
 .../vector-interleaved-store-i16-stride-7.ll  |    20 +-
 .../vector-interleaved-store-i32-stride-5.ll  |   474 +-
 .../vector-interleaved-store-i32-stride-6.ll  |   694 +-
 .../vector-interleaved-store-i64-stride-5.ll  |   324 +-
 .../vector-interleaved-store-i64-stride-7.ll  | 19794 ++++++++--------
 .../vector-interleaved-store-i64-stride-8.ll  |  5971 ++---
 .../vector-interleaved-store-i8-stride-7.ll   |   185 +-
 .../vector-interleaved-store-i8-stride-8.ll   |   640 +-
 llvm/test/CodeGen/X86/vector-mul.ll           |    22 +-
 .../CodeGen/X86/vector-popcnt-128-ult-ugt.ll  |   546 +-
 llvm/test/CodeGen/X86/vector-popcnt-128.ll    |    12 +-
 .../CodeGen/X86/vector-popcnt-256-ult-ugt.ll  |   546 +-
 llvm/test/CodeGen/X86/vector-popcnt-256.ll    |    12 +-
 .../CodeGen/X86/vector-reduce-and-bool.ll     |   232 +-
 .../test/CodeGen/X86/vector-reduce-or-bool.ll |   237 +-
 llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll |    92 +-
 llvm/test/CodeGen/X86/vector-rotate-256.ll    |     4 +-
 llvm/test/CodeGen/X86/vector-rotate-512.ll    |     4 +-
 .../test/CodeGen/X86/vector-shuffle-256-v8.ll |    16 +-
 .../test/CodeGen/X86/vector-shuffle-512-v8.ll |     4 +-
 .../test/CodeGen/X86/vector-shuffle-avx512.ll |    17 +-
 llvm/test/CodeGen/X86/widen_bitcnt.ll         |    14 +-
 llvm/test/CodeGen/X86/widen_fadd.ll           |    11 +-
 llvm/test/CodeGen/X86/widen_fdiv.ll           |    11 +-
 llvm/test/CodeGen/X86/widen_fmul.ll           |    11 +-
 llvm/test/CodeGen/X86/widen_fsub.ll           |    11 +-
 .../CodeGen/X86/zero_extend_vector_inreg.ll   |    16 +-
 .../zero_extend_vector_inreg_of_broadcast.ll  |    12 +-
 ...d_vector_inreg_of_broadcast_from_memory.ll |    14 +-
 78 files changed, 28601 insertions(+), 28645 deletions(-)

diff --git a/llvm/lib/Target/X86/X86FixupVectorConstants.cpp b/llvm/lib/Target/X86/X86FixupVectorConstants.cpp
index 99e92bbcf996d..15ae1272ab558 100644
--- a/llvm/lib/Target/X86/X86FixupVectorConstants.cpp
+++ b/llvm/lib/Target/X86/X86FixupVectorConstants.cpp
@@ -293,6 +293,18 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
   case X86::VMOVUPSZ128rm:
     return ConvertToBroadcast(0, 0, X86::VMOVDDUPZ128rm,
                               X86::VBROADCASTSSZ128rm, 0, 0, 1);
+  case X86::VMOVAPDZ128rmk:
+  case X86::VMOVUPDZ128rmk:
+    return ConvertToBroadcast(0, 0, X86::VMOVDDUPZ128rmk, 0, 0, 0, 3);
+  case X86::VMOVAPSZ128rmk:
+  case X86::VMOVUPSZ128rmk:
+    return ConvertToBroadcast(0, 0, 0, X86::VBROADCASTSSZ128rmk, 0, 0, 3);
+  case X86::VMOVAPDZ128rmkz:
+  case X86::VMOVUPDZ128rmkz:
+    return ConvertToBroadcast(0, 0, X86::VMOVDDUPZ128rmkz, 0, 0, 0, 2);
+  case X86::VMOVAPSZ128rmkz:
+  case X86::VMOVUPSZ128rmkz:
+    return ConvertToBroadcast(0, 0, 0, X86::VBROADCASTSSZ128rmkz, 0, 0, 2);
   case X86::VMOVAPDZ256rm:
   case X86::VMOVAPSZ256rm:
   case X86::VMOVUPDZ256rm:
@@ -300,6 +312,18 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
     return ConvertToBroadcast(0, X86::VBROADCASTF32X4Z256rm,
                               X86::VBROADCASTSDZ256rm, X86::VBROADCASTSSZ256rm,
                               0, 0, 1);
+  case X86::VMOVAPDZ256rmk:
+  case X86::VMOVUPDZ256rmk:
+    return ConvertToBroadcast(0, 0, X86::VBROADCASTSDZ256rmk, 0, 0, 0, 3);
+  case X86::VMOVAPSZ256rmk:
+  case X86::VMOVUPSZ256rmk:
+    return ConvertToBroadcast(0, 0, 0, X86::VBROADCASTSSZ256rmk, 0, 0, 3);
+  case X86::VMOVAPDZ256rmkz:
+  case X86::VMOVUPDZ256rmkz:
+    return ConvertToBroadcast(0, 0, X86::VBROADCASTSDZ256rmkz, 0, 0, 0, 2);
+  case X86::VMOVAPSZ256rmkz:
+  case X86::VMOVUPSZ256rmkz:
+    return ConvertToBroadcast(0, 0, 0, X86::VBROADCASTSSZ256rmkz, 0, 0, 2);
   case X86::VMOVAPDZrm:
   case X86::VMOVAPSZrm:
   case X86::VMOVUPDZrm:
@@ -307,6 +331,18 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
     return ConvertToBroadcast(X86::VBROADCASTF64X4rm, X86::VBROADCASTF32X4rm,
                               X86::VBROADCASTSDZrm, X86::VBROADCASTSSZrm, 0, 0,
                               1);
+  case X86::VMOVAPDZrmk:
+  case X86::VMOVUPDZrmk:
+    return ConvertToBroadcast(0, 0, X86::VBROADCASTSDZrmk, 0, 0, 0, 3);
+  case X86::VMOVAPSZrmk:
+  case X86::VMOVUPSZrmk:
+    return ConvertToBroadcast(0, 0, 0, X86::VBROADCASTSSZrmk, 0, 0, 3);
+  case X86::VMOVAPDZrmkz:
+  case X86::VMOVUPDZrmkz:
+    return ConvertToBroadcast(0, 0, X86::VBROADCASTSDZrmkz, 0, 0, 0, 2);
+  case X86::VMOVAPSZrmkz:
+  case X86::VMOVUPSZrmkz:
+    return ConvertToBroadcast(0, 0, 0, X86::VBROADCASTSSZrmkz, 0, 0, 2);
     /* Integer Loads */
   case X86::VMOVDQArm:
   case X86::VMOVDQUrm:
@@ -331,6 +367,18 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
                               X86::VPBROADCASTDZ128rm,
                               HasBWI ? X86::VPBROADCASTWZ128rm : 0,
                               HasBWI ? X86::VPBROADCASTBZ128rm : 0, 1);
+  case X86::VMOVDQA32Z128rmk:
+  case X86::VMOVDQU32Z128rmk:
+    return ConvertToBroadcast(0, 0, 0, X86::VPBROADCASTDZ128rmk, 0, 0, 3);
+  case X86::VMOVDQA32Z128rmkz:
+  case X86::VMOVDQU32Z128rmkz:
+    return ConvertToBroadcast(0, 0, 0, X86::VPBROADCASTDZ128rmkz, 0, 0, 2);
+  case X86::VMOVDQA64Z128rmk:
+  case X86::VMOVDQU64Z128rmk:
+    return ConvertToBroadcast(0, 0, X86::VPBROADCASTQZ128rmk, 0, 0, 0, 3);
+  case X86::VMOVDQA64Z128rmkz:
+  case X86::VMOVDQU64Z128rmkz:
+    return ConvertToBroadcast(0, 0, X86::VPBROADCASTQZ128rmkz, 0, 0, 0, 2);
   case X86::VMOVDQA32Z256rm:
   case X86::VMOVDQA64Z256rm:
   case X86::VMOVDQU32Z256rm:
@@ -339,6 +387,24 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
                               X86::VPBROADCASTQZ256rm, X86::VPBROADCASTDZ256rm,
                               HasBWI ? X86::VPBROADCASTWZ256rm : 0,
                               HasBWI ? X86::VPBROADCASTBZ256rm : 0, 1);
+  case X86::VMOVDQA32Z256rmk:
+  case X86::VMOVDQU32Z256rmk:
+    return ConvertToBroadcast(0, X86::VBROADCASTI32X4Z256rmk,
+                              HasDQI ? X86::VBROADCASTI32X2Z256rmk : 0,
+                              X86::VPBROADCASTDZ256rmk, 0, 0, 3);
+  case X86::VMOVDQA32Z256rmkz:
+  case X86::VMOVDQU32Z256rmkz:
+    return ConvertToBroadcast(0, X86::VBROADCASTI32X4Z256rmkz,
+                              HasDQI ? X86::VBROADCASTI32X2Z256rmkz : 0,
+                              X86::VPBROADCASTDZ256rmkz, 0, 0, 2);
+  case X86::VMOVDQA64Z256rmk:
+  case X86::VMOVDQU64Z256rmk:
+    return ConvertToBroadcast(0, HasDQI ? X86::VBROADCASTI64X2Z128rmk : 0,
+                              X86::VPBROADCASTQZ256rmk, 0, 0, 0, 3);
+  case X86::VMOVDQA64Z256rmkz:
+  case X86::VMOVDQU64Z256rmkz:
+    return ConvertToBroadcast(0, HasDQI ? X86::VBROADCASTI64X2Z128rmkz : 0,
+                              X86::VPBROADCASTQZ256rmkz, 0, 0, 0, 2);
   case X86::VMOVDQA32Zrm:
   case X86::VMOVDQA64Zrm:
   case X86::VMOVDQU32Zrm:
@@ -347,39 +413,62 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
                               X86::VPBROADCASTQZrm, X86::VPBROADCASTDZrm,
                               HasBWI ? X86::VPBROADCASTWZrm : 0,
                               HasBWI ? X86::VPBROADCASTBZrm : 0, 1);
+  case X86::VMOVDQA32Zrmk:
+  case X86::VMOVDQU32Zrmk:
+    return ConvertToBroadcast(
+        HasDQI ? X86::VBROADCASTI32X8rmk : 0, X86::VBROADCASTI32X4rmk,
+        HasDQI ? X86::VBROADCASTI32X2Zrmk : 0, X86::VPBROADCASTDZrmk, 0, 0, 3);
+  case X86::VMOVDQA32Zrmkz:
+  case X86::VMOVDQU32Zrmkz:
+    return ConvertToBroadcast(HasDQI ? X86::VBROADCASTI32X8rmkz : 0,
+                              X86::VBROADCASTI32X4rmkz,
+                              HasDQI ? X86::VBROADCASTI32X2Zrmkz : 0,
+                              X86::VPBROADCASTDZrmkz, 0, 0, 2);
+  case X86::VMOVDQA64Zrmk:
+  case X86::VMOVDQU64Zrmk:
+    return ConvertToBroadcast(X86::VBROADCASTI64X4rmk,
+                              HasDQI ? X86::VBROADCASTI64X2rmk : 0,
+                              X86::VPBROADCASTQZrmk, 0, 0, 0, 3);
+  case X86::VMOVDQA64Zrmkz:
+  case X86::VMOVDQU64Zrmkz:
+    return ConvertToBroadcast(X86::VBROADCASTI64X4rmkz,
+                              HasDQI ? X86::VBROADCASTI64X2rmkz : 0,
+                              X86::VPBROADCASTQZrmkz, 0, 0, 0, 2);
   }
 
-  auto ConvertToBroadcastAVX512 = [&](unsigned OpSrc32, unsigned OpSrc64) {
-    unsigned OpBcst32 = 0, OpBcst64 = 0;
-    unsigned OpNoBcst32 = 0, OpNoBcst64 = 0;
+  auto ConvertToBroadcastAVX512 = [&](unsigned OpSrc16, unsigned OpSrc32,
+                                      unsigned OpSrc64) {
+    if (OpSrc16) {
+      if (const X86FoldTableEntry *Mem2Bcst =
+              llvm::lookupBroadcastFoldTable(OpSrc16, 16)) {
+        if (ConvertToBroadcast(0, 0, 0, 0, Mem2Bcst->DstOp, 0,
+                               Mem2Bcst->Flags & TB_INDEX_MASK))
+          return true;
+      }
+    }
     if (OpSrc32) {
       if (const X86FoldTableEntry *Mem2Bcst =
               llvm::lookupBroadcastFoldTable(OpSrc32, 32)) {
-        OpBcst32 = Mem2Bcst->DstOp;
-        OpNoBcst32 = Mem2Bcst->Flags & TB_INDEX_MASK;
+        if (ConvertToBroadcast(0, 0, 0, Mem2Bcst->DstOp, 0, 0,
+                               Mem2Bcst->Flags & TB_INDEX_MASK))
+          return true;
       }
     }
     if (OpSrc64) {
       if (const X86FoldTableEntry *Mem2Bcst =
               llvm::lookupBroadcastFoldTable(OpSrc64, 64)) {
-        OpBcst64 = Mem2Bcst->DstOp;
-        OpNoBcst64 = Mem2Bcst->Flags & TB_INDEX_MASK;
+        if (ConvertToBroadcast(0, 0, Mem2Bcst->DstOp, 0, 0, 0,
+                               Mem2Bcst->Flags & TB_INDEX_MASK))
+          return true;
       }
     }
-    assert(((OpBcst32 == 0) || (OpBcst64 == 0) || (OpNoBcst32 == OpNoBcst64)) &&
-           "OperandNo mismatch");
-
-    if (OpBcst32 || OpBcst64) {
-      unsigned OpNo = OpBcst32 == 0 ? OpNoBcst64 : OpNoBcst32;
-      return ConvertToBroadcast(0, 0, OpBcst64, OpBcst32, 0, 0, OpNo);
-    }
     return false;
   };
 
   // Attempt to find a AVX512 mapping from a full width memory-fold instruction
   // to a broadcast-fold instruction variant.
   if ((MI.getDesc().TSFlags & X86II::EncodingMask) == X86II::EVEX)
-    return ConvertToBroadcastAVX512(Opc, Opc);
+    return ConvertToBroadcastAVX512(Opc, Opc, Opc);
 
   // Reverse the X86InstrInfo::setExecutionDomainCustom EVEX->VEX logic
   // conversion to see if we can convert to a broadcasted (integer) logic op.
@@ -436,7 +525,7 @@ bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
       break;
     }
     if (OpSrc32 || OpSrc64)
-      return ConvertToBroadcastAVX512(OpSrc32, OpSrc64);
+      return ConvertToBroadcastAVX512(0, OpSrc32, OpSrc64);
   }
 
   return false;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f7fe82fdf08a8..8bf55ea4b0355 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7114,6 +7114,14 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
          "Unsupported vector type for broadcast.");
 
+  // On AVX512VL targets we're better off keeping the full width constant load
+  // and letting X86FixupVectorConstantsPass handle conversion to
+  // broadcast/broadcast-fold.
+  // AVX512 targets without AVX512VL can do this only for 512-bit vectors.
+  if (Subtarget.hasAVX512() && (Subtarget.hasVLX() || VT.is512BitVector()) &&
+      BVOp->isConstant())
+    return SDValue();
+
   // See if the build vector is a repeating sequence of scalars (inc. splat).
   SDValue Ld;
   BitVector UndefElements;
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index c9d0f66c6e46b..d75ab699dfbb6 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -297,6 +297,8 @@ static bool matchBroadcastSize(const X86FoldTableEntry &Entry,
   case TB_BCAST_SS:
   case TB_BCAST_D:
     return BroadcastBits == 32;
+  case TB_BCAST_SH:
+    return BroadcastBits == 16;
   }
   return false;
 }
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index d61e33ccb22a9..54db79ce47bb6 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -1482,10 +1482,8 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $202, %ymm0, %ymm1, %ymm2
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1497,10 +1495,8 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpternlogq $202, %ymm0, %ymm1, %ymm2
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512DQ-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
@@ -3263,10 +3259,8 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX512F-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX512F-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm3
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm3, %ymm1
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
 ; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, 32(%rcx)
 ; AVX512F-NEXT:    vmovdqa %ymm1, (%rcx)
@@ -3280,10 +3274,8 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512DQ-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX512DQ-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm3
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm3, %ymm1
+; AVX512DQ-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
 ; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, 32(%rcx)
 ; AVX512DQ-NEXT:    vmovdqa %ymm1, (%rcx)
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index 6d5f8a78cb1d7..37677aeeb5212 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1213,10 +1213,9 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ;
 ; AVX512F-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $172, 32(%rdi), %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVX512F-NEXT:    vpaddb (%rsi), %ymm1, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
@@ -1224,10 +1223,9 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ;
 ; AVX512DQ-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2:
 ; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpternlogq $172, 32(%rdi), %ymm0, %ymm1
+; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm1, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX512DQ-NEXT:    vzeroupper
@@ -2629,10 +2627,8 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX512F-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[0,1,0,1]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm0, %ymm3
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm3, %ymm0
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm2, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX512F-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rdx)
@@ -2644,10 +2640,8 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512DQ-NEXT:    vmovdqa 48(%rdi), %xmm0
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[0,1,0,1]
-; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpternlogq $202, %ymm2, %ymm0, %ymm3
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm3, %ymm0
+; AVX512DQ-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm2, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX512DQ-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rdx)
diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index ec0f14ae4e58e..32b9c22497019 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -3671,10 +3671,9 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1(ptr %vp, <4
 define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4 x float> %vec2, <4 x float> %mask) {
 ; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovddup {{.*#+}} xmm2 = [4,14,4,14]
-; CHECK-NEXT:    # xmm2 = mem[0,0]
-; CHECK-NEXT:    vmovaps 32(%rdi), %ymm3
-; CHECK-NEXT:    vpermt2ps (%rdi), %ymm2, %ymm3
+; CHECK-NEXT:    vmovaps 32(%rdi), %ymm2
+; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm3 = [4,14,4,14,4,14,4,14]
+; CHECK-NEXT:    vpermi2ps (%rdi), %ymm2, %ymm3
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vcmpeqps %xmm2, %xmm1, %k1
 ; CHECK-NEXT:    vmovaps %xmm3, %xmm0 {%k1}
@@ -3690,12 +3689,11 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4 x
 define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2(ptr %vp, <4 x float> %mask) {
 ; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovddup {{.*#+}} xmm2 = [4,14,4,14]
-; CHECK-NEXT:    # xmm2 = mem[0,0]
-; CHECK-NEXT:    vmovaps 32(%rdi), %ymm1
+; CHECK-NEXT:    vmovaps 32(%rdi), %ymm2
+; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [4,14,4,14,4,14,4,14]
 ; CHECK-NEXT:    vxorps %xmm3, %xmm3, %xmm3
 ; CHECK-NEXT:    vcmpeqps %xmm3, %xmm0, %k1
-; CHECK-NEXT:    vpermt2ps (%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT:    vpermi2ps (%rdi), %ymm2, %ymm1 {%k1} {z}
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/avx512fp16-arith.ll b/llvm/test/CodeGen/X86/avx512fp16-arith.ll
index 8d811d8d29e06..192ef3ef20e31 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-arith.ll
@@ -297,8 +297,7 @@ entry:
 define half @fneg(half %x) {
 ; CHECK-LABEL: fneg:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = fneg half %x
   ret half %a
@@ -307,8 +306,7 @@ define half @fneg(half %x) {
 define half @fneg_idiom(half %x) {
 ; CHECK-LABEL: fneg_idiom:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = fsub half -0.0, %x
   ret half %a
@@ -317,8 +315,7 @@ define half @fneg_idiom(half %x) {
 define half @fabs(half %x) {
 ; CHECK-LABEL: fabs:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = call half @llvm.fabs.f16(half %x)
   ret half %a
@@ -328,8 +325,7 @@ declare half @llvm.fabs.f16(half)
 define half @fcopysign(half %x, half %y) {
 ; CHECK-LABEL: fcopysign:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpternlogd $226, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT:    vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %a = call half @llvm.copysign.f16(half %x, half %y)
   ret half %a
@@ -339,10 +335,9 @@ declare half @llvm.copysign.f16(half, half)
 define half @fround(half %x) {
 ; CHECK-LABEL: fround:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
-; CHECK-NEXT:    vpternlogq $248, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT:    vaddsh %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
+; CHECK-NEXT:    vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; CHECK-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = call half @llvm.round.f16(half %x)
@@ -353,8 +348,7 @@ declare half @llvm.round.f16(half)
 define <8 x half> @fnegv8f16(<8 x half> %x) {
 ; CHECK-LABEL: fnegv8f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = fneg <8 x half> %x
   ret <8 x half> %a
@@ -363,8 +357,7 @@ define <8 x half> @fnegv8f16(<8 x half> %x) {
 define <8 x half> @fneg_idiomv8f16(<8 x half> %x) {
 ; CHECK-LABEL: fneg_idiomv8f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = fsub <8 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %x
   ret <8 x half> %a
@@ -373,8 +366,7 @@ define <8 x half> @fneg_idiomv8f16(<8 x half> %x) {
 define <8 x half> @fabsv8f16(<8 x half> %x) {
 ; CHECK-LABEL: fabsv8f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = call <8 x half> @llvm.fabs.v8f16(<8 x half> %x)
   ret <8 x half> %a
@@ -394,10 +386,9 @@ declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>)
 define <8 x half> @roundv8f16(<8 x half> %x) {
 ; CHECK-LABEL: roundv8f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
-; CHECK-NEXT:    vpternlogq $248, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT:    vaddph %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
+; CHECK-NEXT:    vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; CHECK-NEXT:    vaddph %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    vrndscaleph $11, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %a = call <8 x half> @llvm.round.v8f16(<8 x half> %x)
@@ -408,8 +399,7 @@ declare <8 x half> @llvm.round.v8f16(<8 x half>)
 define <16 x half> @fnegv16f16(<16 x half> %x) {
 ; CHECK-LABEL: fnegv16f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %a = fneg <16 x half> %x
   ret <16 x half> %a
@@ -418,8 +408,7 @@ define <16 x half> @fnegv16f16(<16 x half> %x) {
 define <16 x half> @fneg_idiomv16f16(<16 x half> %x) {
 ; CHECK-LABEL: fneg_idiomv16f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %a = fsub <16 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %x
   ret <16 x half> %a
@@ -428,8 +417,7 @@ define <16 x half> @fneg_idiomv16f16(<16 x half> %x) {
 define <16 x half> @fabsv16f16(<16 x half> %x) {
 ; CHECK-LABEL: fabsv16f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %a = call <16 x half> @llvm.fabs.v16f16(<16 x half> %x)
   ret <16 x half> %a
@@ -449,10 +437,9 @@ declare <16 x half> @llvm.copysign.v16f16(<16 x half>, <16 x half>)
 define <16 x half> @roundv16f16(<16 x half> %x) {
 ; CHECK-LABEL: roundv16f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm2 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
-; CHECK-NEXT:    vpternlogq $248, %ymm1, %ymm0, %ymm2
-; CHECK-NEXT:    vaddph %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
+; CHECK-NEXT:    vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
+; CHECK-NEXT:    vaddph %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vrndscaleph $11, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %a = call <16 x half> @llvm.round.v16f16(<16 x half> %x)
@@ -463,8 +450,7 @@ declare <16 x half> @llvm.round.v16f16(<16 x half>)
 define <32 x half> @fnegv32f16(<32 x half> %x) {
 ; CHECK-LABEL: fnegv32f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a = fneg <32 x half> %x
   ret <32 x half> %a
@@ -473,8 +459,7 @@ define <32 x half> @fnegv32f16(<32 x half> %x) {
 define <32 x half> @fneg_idiomv32f16(<32 x half> %x) {
 ; CHECK-LABEL: fneg_idiomv32f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a = fsub <32 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %x
   ret <32 x half> %a
@@ -483,8 +468,7 @@ define <32 x half> @fneg_idiomv32f16(<32 x half> %x) {
 define <32 x half> @fabsv32f16(<32 x half> %x) {
 ; CHECK-LABEL: fabsv32f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpandq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a = call <32 x half> @llvm.fabs.v32f16(<32 x half> %x)
   ret <32 x half> %a
@@ -504,10 +488,9 @@ declare <32 x half> @llvm.copysign.v32f16(<32 x half>, <32 x half>)
 define <32 x half> @roundv32f16(<32 x half> %x) {
 ; CHECK-LABEL: roundv32f16:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm2 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
-; CHECK-NEXT:    vpternlogq $248, %zmm1, %zmm0, %zmm2
-; CHECK-NEXT:    vaddph %zmm2, %zmm0, %zmm0
+; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm1 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
+; CHECK-NEXT:    vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
+; CHECK-NEXT:    vaddph %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    vrndscaleph $11, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a = call <32 x half> @llvm.round.v32f16(<32 x half> %x)
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
index 946029ae921c0..f6b431ca79076 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
@@ -69,8 +69,7 @@ entry:
 define dso_local <32 x half> @test5(<32 x half> noundef %a, <32 x half> noundef %b) local_unnamed_addr #0 {
 ; CHECK-LABEL: test5:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} zmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxorq %zmm2, %zmm1, %zmm2
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm2
 ; CHECK-NEXT:    vfmulcph %zmm2, %zmm0, %zmm1
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm0
 ; CHECK-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll b/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
index e2ea8974f6551..fad302d4d70c4 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-fp-logic.ll
@@ -211,8 +211,7 @@ define half @movmsk(half %x) {
 define half @bitcast_fabs(half %x) {
 ; CHECK-LABEL: bitcast_fabs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast half %x to i16
   %and = and i16 %bc1, 32767
@@ -223,8 +222,7 @@ define half @bitcast_fabs(half %x) {
 define half @bitcast_fneg(half %x) {
 ; CHECK-LABEL: bitcast_fneg:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast half %x to i16
   %xor = xor i16 %bc1, 32768
@@ -235,8 +233,7 @@ define half @bitcast_fneg(half %x) {
 define <8 x half> @bitcast_fabs_vec(<8 x half> %x) {
 ; CHECK-LABEL: bitcast_fabs_vec:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; CHECK-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <8 x half> %x to <8 x i16>
   %and = and <8 x i16> %bc1, <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>
@@ -247,8 +244,7 @@ define <8 x half> @bitcast_fabs_vec(<8 x half> %x) {
 define <8 x half> @bitcast_fneg_vec(<8 x half> %x) {
 ; CHECK-LABEL: bitcast_fneg_vec:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <8 x half> %x to <8 x i16>
   %xor = xor <8 x i16> %bc1, <i16 32768, i16 32768, i16 32768, i16 32768, i16 32768, i16 32768, i16 32768, i16 32768>
@@ -285,8 +281,7 @@ define half @fsub_bitcast_fneg(half %x, half %y) {
 define half @nabs(half %a) {
 ; CHECK-LABEL: nabs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %conv = bitcast half %a to i16
   %and = or i16 %conv, -32768
@@ -297,8 +292,7 @@ define half @nabs(half %a) {
 define <8 x half> @nabsv8f16(<8 x half> %a) {
 ; CHECK-LABEL: nabsv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; CHECK-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %conv = bitcast <8 x half> %a to <8 x i16>
   %and = or <8 x i16> %conv, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
@@ -357,7 +351,7 @@ define <8 x half> @fsub_bitcast_fneg_vec_undef_elts(<8 x half> %x, <8 x half> %y
 define <8 x half> @fadd_bitcast_fneg_vec_width(<8 x half> %x, <8 x half> %y) {
 ; CHECK-LABEL: fadd_bitcast_fneg_vec_width:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm1
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddph %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <8 x half> %y to <2 x i64>
@@ -370,7 +364,7 @@ define <8 x half> @fadd_bitcast_fneg_vec_width(<8 x half> %x, <8 x half> %y) {
 define <8 x half> @fsub_bitcast_fneg_vec_width(<8 x half> %x, <8 x half> %y) {
 ; CHECK-LABEL: fsub_bitcast_fneg_vec_width:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm1
+; CHECK-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
 ; CHECK-NEXT:    vsubph %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %bc1 = bitcast <8 x half> %y to <2 x i64>
diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 501e73c46af9c..16eaa33a55628 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -60,18 +60,11 @@ define i1 @trunc_v2i64_cmp(<2 x i64> %a0) nounwind {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX12-LABEL: trunc_v2i64_cmp:
-; AVX12:       # %bb.0:
-; AVX12-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX12-NEXT:    sete %al
-; AVX12-NEXT:    retq
-;
-; AVX512-LABEL: trunc_v2i64_cmp:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; AVX512-NEXT:    vptest %xmm1, %xmm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    retq
+; AVX-LABEL: trunc_v2i64_cmp:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %1 = trunc <2 x i64> %a0 to <2 x i1>
   %2 = bitcast <2 x i1> %1 to i2
   %3 = icmp eq i2 %2, 0
@@ -121,18 +114,11 @@ define i1 @trunc_v4i32_cmp(<4 x i32> %a0) nounwind {
 ; SSE41-NEXT:    setb %al
 ; SSE41-NEXT:    retq
 ;
-; AVX12-LABEL: trunc_v4i32_cmp:
-; AVX12:       # %bb.0:
-; AVX12-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX12-NEXT:    setb %al
-; AVX12-NEXT:    retq
-;
-; AVX512-LABEL: trunc_v4i32_cmp:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4294967297,4294967297]
-; AVX512-NEXT:    vptest %xmm1, %xmm0
-; AVX512-NEXT:    setb %al
-; AVX512-NEXT:    retq
+; AVX-LABEL: trunc_v4i32_cmp:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    retq
   %1 = trunc <4 x i32> %a0 to <4 x i1>
   %2 = bitcast <4 x i1> %1 to i4
   %3 = icmp eq i4 %2, -1
@@ -195,18 +181,11 @@ define i1 @trunc_v8i16_cmp(<8 x i16> %a0) nounwind {
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;
-; AVX12-LABEL: trunc_v8i16_cmp:
-; AVX12:       # %bb.0:
-; AVX12-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX12-NEXT:    setne %al
-; AVX12-NEXT:    retq
-;
-; AVX512-LABEL: trunc_v8i16_cmp:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [281479271743489,281479271743489]
-; AVX512-NEXT:    vptest %xmm1, %xmm0
-; AVX512-NEXT:    setne %al
-; AVX512-NEXT:    retq
+; AVX-LABEL: trunc_v8i16_cmp:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    retq
   %1 = trunc <8 x i16> %a0 to <8 x i1>
   %2 = bitcast <8 x i1> %1 to i8
   %3 = icmp ne i8 %2, 0
@@ -264,18 +243,11 @@ define i1 @trunc_v16i8_cmp(<16 x i8> %a0) nounwind {
 ; SSE41-NEXT:    setae %al
 ; SSE41-NEXT:    retq
 ;
-; AVX12-LABEL: trunc_v16i8_cmp:
-; AVX12:       # %bb.0:
-; AVX12-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX12-NEXT:    setae %al
-; AVX12-NEXT:    retq
-;
-; AVX512-LABEL: trunc_v16i8_cmp:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [72340172838076673,72340172838076673]
-; AVX512-NEXT:    vptest %xmm1, %xmm0
-; AVX512-NEXT:    setae %al
-; AVX512-NEXT:    retq
+; AVX-LABEL: trunc_v16i8_cmp:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setae %al
+; AVX-NEXT:    retq
   %1 = trunc <16 x i8> %a0 to <16 x i1>
   %2 = bitcast <16 x i1> %1 to i16
   %3 = icmp ne i16 %2, -1
@@ -350,8 +322,7 @@ define i1 @trunc_v4i64_cmp(<4 x i64> %a0) nounwind {
 ;
 ; AVX512-LABEL: trunc_v4i64_cmp:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
+; AVX512-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
 ; AVX512-NEXT:    setne %al
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -426,8 +397,7 @@ define i1 @trunc_v8i132_cmp(<8 x i32> %a0) nounwind {
 ;
 ; AVX512-LABEL: trunc_v8i132_cmp:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
+; AVX512-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
 ; AVX512-NEXT:    setae %al
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -524,8 +494,7 @@ define i1 @trunc_v16i16_cmp(<16 x i16> %a0) nounwind {
 ;
 ; AVX512-LABEL: trunc_v16i16_cmp:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
+; AVX512-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
 ; AVX512-NEXT:    sete %al
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
@@ -616,8 +585,7 @@ define i1 @trunc_v32i8_cmp(<32 x i8> %a0) nounwind {
 ;
 ; AVX512-LABEL: trunc_v32i8_cmp:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
+; AVX512-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
 ; AVX512-NEXT:    setb %al
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index 13f1451bbc8b0..9aa90de654a44 100644
--- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -1955,13 +1955,6 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
 }
 
 
-; AVX512:       .LCPI37
-; AVX512-NEXT:  .quad	0x4010000000000000      # double 4
-; AVX512-NEXT:  .quad	0x3ff0000000000000      # double 1
-; AVX512-NEXT:  .quad	0x4000000000000000      # double 2
-; AVX512-NEXT:  .quad	0x4008000000000000      # double 3
-; AVX512-NOT:   .quad
-
 define <8 x double> @f8xf64_f256(<8 x double> %a) {
 ; AVX-LABEL: f8xf64_f256:
 ; AVX:       # %bb.0:
diff --git a/llvm/test/CodeGen/X86/combine-bitselect.ll b/llvm/test/CodeGen/X86/combine-bitselect.ll
index 6c266be808eaf..37e8a36bba9a9 100644
--- a/llvm/test/CodeGen/X86/combine-bitselect.ll
+++ b/llvm/test/CodeGen/X86/combine-bitselect.ll
@@ -1164,7 +1164,7 @@ define void @constantfold_andn_mask() nounwind {
 ; AVX512VL-NEXT:    pushq %rax
 ; AVX512VL-NEXT:    callq use at PLT
 ; AVX512VL-NEXT:    vmovdqu (%rax), %xmm1
-; AVX512VL-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [31,248,31,248,31,248,31,248,31,248,31,248,31,248,31,248]
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [31,248,31,248,31,248,31,248,31,248,31,248,31,248,31,248]
 ; AVX512VL-NEXT:    vpand %xmm2, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpavgb %xmm2, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpandn %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll
index e1d963ad1ec99..81e6c534cd90e 100644
--- a/llvm/test/CodeGen/X86/combine-pmuldq.ll
+++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll
@@ -284,8 +284,7 @@ define i32 @PR43159(ptr %a0) {
 ; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; AVX512VL-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
 ; AVX512VL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
@@ -307,8 +306,7 @@ define i32 @PR43159(ptr %a0) {
 ; AVX512DQVL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
 ; AVX512DQVL-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
 ; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; AVX512DQVL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; AVX512DQVL-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX512DQVL-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
 ; AVX512DQVL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512DQVL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
 ; AVX512DQVL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll
index 549fe72626973..df8646614b447 100644
--- a/llvm/test/CodeGen/X86/combine-sdiv.ll
+++ b/llvm/test/CodeGen/X86/combine-sdiv.ll
@@ -974,9 +974,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
 ; AVX512F-NEXT:    vpsravd %zmm3, %zmm2, %zmm2
 ; AVX512F-NEXT:    vpmovdw %zmm2, %ymm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpternlogq $216, %zmm2, %zmm1, %zmm0
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
diff --git a/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll b/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
index 018b6c2d20f1e..4f290acb81d24 100644
--- a/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
@@ -257,7 +257,7 @@ define void @vp_fabs_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
 ; AVX512-LABEL: vp_fabs_v4f32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    vmovaps %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
   %res = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %a0, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
   store <4 x float> %res, ptr %out
@@ -320,7 +320,7 @@ define void @vp_fneg_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
 ; AVX512-LABEL: vp_fneg_v4f32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    vmovaps %xmm0, (%rdi)
 ; AVX512-NEXT:    retq
   %res = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a0, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
   store <4 x float> %res, ptr %out
diff --git a/llvm/test/CodeGen/X86/fp-round.ll b/llvm/test/CodeGen/X86/fp-round.ll
index 8efd5819a6d22..acfc88e5bac2b 100644
--- a/llvm/test/CodeGen/X86/fp-round.ll
+++ b/llvm/test/CodeGen/X86/fp-round.ll
@@ -65,10 +65,9 @@ define half @round_f16(half %h) {
 ;
 ; AVX512FP16-LABEL: round_f16:
 ; AVX512FP16:       ## %bb.0: ## %entry
-; AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
-; AVX512FP16-NEXT:    vpternlogq $248, %xmm1, %xmm0, %xmm2
-; AVX512FP16-NEXT:    vaddsh %xmm2, %xmm0, %xmm0
+; AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1,4.9976E-1]
+; AVX512FP16-NEXT:    vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512FP16-NEXT:    vaddsh %xmm1, %xmm0, %xmm0
 ; AVX512FP16-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
 ; AVX512FP16-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/icmp-abs-C-vec.ll b/llvm/test/CodeGen/X86/icmp-abs-C-vec.ll
index 1e50e5b811233..22b1bde9a2366 100644
--- a/llvm/test/CodeGen/X86/icmp-abs-C-vec.ll
+++ b/llvm/test/CodeGen/X86/icmp-abs-C-vec.ll
@@ -154,8 +154,7 @@ define <4 x i64> @illegal_abs_to_eq_or_sext(<4 x i64> %x) {
 ; AVX512-LABEL: illegal_abs_to_eq_or_sext:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsq %ymm0, %ymm0
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [129,129,129,129]
-; AVX512-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    retq
 ;
 ; AVX2-LABEL: illegal_abs_to_eq_or_sext:
@@ -279,8 +278,7 @@ define <4 x i64> @illegal_abs_to_ne_and_sext(<4 x i64> %x) {
 ; AVX512-LABEL: illegal_abs_to_ne_and_sext:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsq %ymm0, %ymm0
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [129,129,129,129]
-; AVX512-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vpternlogq $15, %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
 ;
@@ -346,8 +344,7 @@ define <4 x i1> @legal_abs_eq_unchanged(<4 x i32> %x) {
 ; AVX512-LABEL: legal_abs_eq_unchanged:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [129,129,129,129]
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
 ; AVX2-LABEL: legal_abs_eq_unchanged:
@@ -380,8 +377,7 @@ define <4 x i32> @legal_abs_eq_unchanged_sext(<4 x i32> %x) {
 ; AVX512-LABEL: legal_abs_eq_unchanged_sext:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [129,129,129,129]
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
 ; AVX2-LABEL: legal_abs_eq_unchanged_sext:
@@ -415,8 +411,7 @@ define <4 x i1> @legal_abs_ne_unchangedd(<4 x i32> %x) {
 ; AVX512-LABEL: legal_abs_ne_unchangedd:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [129,129,129,129]
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
@@ -456,8 +451,7 @@ define <4 x i32> @legal_abs_ne_unchangedd_sext(<4 x i32> %x) {
 ; AVX512-LABEL: legal_abs_ne_unchangedd_sext:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [129,129,129,129]
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
@@ -558,9 +552,8 @@ define <4 x i1> @eq_or_to_abs_vec4x64(<4 x i64> %x) {
 define <4 x i64> @eq_or_to_abs_vec4x64_sext(<4 x i64> %x) {
 ; AVX512-LABEL: eq_or_to_abs_vec4x64_sext:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [129,129,129,129]
 ; AVX512-NEXT:    vpabsq %ymm0, %ymm0
-; AVX512-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    retq
 ;
 ; AVX2-LABEL: eq_or_to_abs_vec4x64_sext:
@@ -693,9 +686,8 @@ define <4 x i1> @ne_and_to_abs_vec4x64(<4 x i64> %x) {
 define <4 x i64> @ne_and_to_abs_vec4x64_sext(<4 x i64> %x) {
 ; AVX512-LABEL: ne_and_to_abs_vec4x64_sext:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [129,129,129,129]
 ; AVX512-NEXT:    vpabsq %ymm0, %ymm0
-; AVX512-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512-NEXT:    vpternlogq $15, %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
 ;
@@ -768,9 +760,8 @@ define <4 x i64> @ne_and_to_abs_vec4x64_sext(<4 x i64> %x) {
 define <4 x i1> @eq_or_to_abs_vec4x32(<4 x i32> %x) {
 ; AVX512-LABEL: eq_or_to_abs_vec4x32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
 ; AVX2-LABEL: eq_or_to_abs_vec4x32:
@@ -803,9 +794,8 @@ define <4 x i1> @eq_or_to_abs_vec4x32(<4 x i32> %x) {
 define <4 x i32> @eq_or_to_abs_vec4x32_sext(<4 x i32> %x) {
 ; AVX512-LABEL: eq_or_to_abs_vec4x32_sext:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
 ; AVX2-LABEL: eq_or_to_abs_vec4x32_sext:
@@ -839,9 +829,8 @@ define <4 x i32> @eq_or_to_abs_vec4x32_sext(<4 x i32> %x) {
 define <4 x i1> @ne_and_to_abs_vec4x32(<4 x i32> %x) {
 ; AVX512-LABEL: ne_and_to_abs_vec4x32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
@@ -880,9 +869,8 @@ define <4 x i1> @ne_and_to_abs_vec4x32(<4 x i32> %x) {
 define <4 x i32> @ne_and_to_abs_vec4x32_sext(<4 x i32> %x) {
 ; AVX512-LABEL: ne_and_to_abs_vec4x32_sext:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
 ; AVX512-NEXT:    vpabsd %xmm0, %xmm0
-; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 1289eef7795dc..945830c832d54 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -547,9 +547,9 @@ define <8 x i32> @test9(ptr %base, <8 x i64> %ind1, <8 x i32>%ind5) {
 ; SKX_LARGE-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
 ; SKX_LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; SKX_LARGE-NEXT:    vpmuldq (%rax){1to8}, %zmm1, %zmm1
+; SKX_LARGE-NEXT:    vpmuldq (%rax), %zmm1, %zmm1
 ; SKX_LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; SKX_LARGE-NEXT:    vpmullq (%rax){1to8}, %zmm0, %zmm0
+; SKX_LARGE-NEXT:    vpmullq (%rax), %zmm0, %zmm0
 ; SKX_LARGE-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
 ; SKX_LARGE-NEXT:    vpaddq %zmm1, %zmm0, %zmm1
 ; SKX_LARGE-NEXT:    kxnorw %k0, %k0, %k1
@@ -631,9 +631,9 @@ define <8 x i32> @test10(ptr %base, <8 x i64> %i1, <8 x i32>%ind5) {
 ; SKX_LARGE-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
 ; SKX_LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; SKX_LARGE-NEXT:    vpmuldq (%rax){1to8}, %zmm1, %zmm1
+; SKX_LARGE-NEXT:    vpmuldq (%rax), %zmm1, %zmm1
 ; SKX_LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; SKX_LARGE-NEXT:    vpmullq (%rax){1to8}, %zmm0, %zmm0
+; SKX_LARGE-NEXT:    vpmullq (%rax), %zmm0, %zmm0
 ; SKX_LARGE-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
 ; SKX_LARGE-NEXT:    vpaddq %zmm1, %zmm0, %zmm1
 ; SKX_LARGE-NEXT:    kxnorw %k0, %k0, %k1
@@ -4147,7 +4147,7 @@ define <16 x float> @zext_index(ptr %base, <16 x i32> %ind) {
 ; SKX_LARGE-LABEL: zext_index:
 ; SKX_LARGE:       # %bb.0:
 ; SKX_LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; SKX_LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm1
+; SKX_LARGE-NEXT:    vandps (%rax), %zmm0, %zmm1
 ; SKX_LARGE-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_LARGE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; SKX_LARGE-NEXT:    vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
diff --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll
index f26bbb7e5c2bd..881a6959497eb 100644
--- a/llvm/test/CodeGen/X86/movmsk-cmp.ll
+++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll
@@ -1120,24 +1120,11 @@ define i1 @allzeros_v16i8_and1(<16 x i8> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v16i8_and1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v16i8_and1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v16i8_and1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [72340172838076673,72340172838076673]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v16i8_and1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <16 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %tmp1 = icmp ne <16 x i8> %tmp, zeroinitializer
   %tmp2 = bitcast <16 x i1> %tmp1 to i16
@@ -1230,13 +1217,20 @@ define i1 @allzeros_v32i8_and1(<32 x i8> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v32i8_and1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v32i8_and1:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v32i8_and1:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <32 x i8> %arg, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %tmp1 = icmp ne <32 x i8> %tmp, zeroinitializer
   %tmp2 = bitcast <32 x i1> %tmp1 to i32
@@ -1416,24 +1410,11 @@ define i1 @allzeros_v8i16_and1(<8 x i16> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v8i16_and1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v8i16_and1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v8i16_and1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [281479271743489,281479271743489]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v8i16_and1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <8 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %tmp1 = icmp ne <8 x i16> %tmp, zeroinitializer
   %tmp2 = bitcast <8 x i1> %tmp1 to i8
@@ -1656,13 +1637,20 @@ define i1 @allzeros_v16i16_and1(<16 x i16> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v16i16_and1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v16i16_and1:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v16i16_and1:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <16 x i16> %arg, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %tmp1 = icmp ne <16 x i16> %tmp, zeroinitializer
   %tmp2 = bitcast <16 x i1> %tmp1 to i16
@@ -1726,24 +1714,11 @@ define i1 @allzeros_v4i32_and1(<4 x i32> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v4i32_and1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v4i32_and1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v4i32_and1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4294967297,4294967297]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v4i32_and1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <4 x i32> %arg, <i32 1, i32 1, i32 1, i32 1>
   %tmp1 = icmp ne <4 x i32> %tmp, zeroinitializer
   %tmp2 = bitcast <4 x i1> %tmp1 to i4
@@ -1839,13 +1814,20 @@ define i1 @allzeros_v8i32_and1(<8 x i32> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v8i32_and1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v8i32_and1:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v8i32_and1:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <8 x i32> %arg, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp1 = icmp ne <8 x i32> %tmp, zeroinitializer
   %tmp2 = bitcast <8 x i1> %tmp1 to i8
@@ -2021,24 +2003,11 @@ define i1 @allzeros_v2i64_and1(<2 x i64> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v2i64_and1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v2i64_and1:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v2i64_and1:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v2i64_and1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <2 x i64> %arg, <i64 1, i64 1>
   %tmp1 = icmp ne <2 x i64> %tmp, zeroinitializer
   %tmp2 = bitcast <2 x i1> %tmp1 to i2
@@ -2134,13 +2103,20 @@ define i1 @allzeros_v4i64_and1(<4 x i64> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v4i64_and1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v4i64_and1:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v4i64_and1:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <4 x i64> %arg, <i64 1, i64 1, i64 1, i64 1>
   %tmp1 = icmp ne <4 x i64> %tmp, zeroinitializer
   %tmp2 = bitcast <4 x i1> %tmp1 to i4
@@ -2255,8 +2231,7 @@ define i1 @allzeros_v8i64_and1(<8 x i64> %arg) {
 ;
 ; AVX512-LABEL: allzeros_v8i64_and1:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1]
-; AVX512-NEXT:    vptestmd %zmm1, %zmm0, %k0
+; AVX512-NEXT:    vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0
 ; AVX512-NEXT:    kortestw %k0, %k0
 ; AVX512-NEXT:    sete %al
 ; AVX512-NEXT:    vzeroupper
@@ -2321,24 +2296,11 @@ define i1 @allzeros_v16i8_and4(<16 x i8> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v16i8_and4:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v16i8_and4:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v16i8_and4:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [289360691352306692,289360691352306692]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v16i8_and4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <16 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
   %tmp1 = icmp ne <16 x i8> %tmp, zeroinitializer
   %tmp2 = bitcast <16 x i1> %tmp1 to i16
@@ -2431,13 +2393,20 @@ define i1 @allzeros_v32i8_and4(<32 x i8> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v32i8_and4:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [289360691352306692,289360691352306692,289360691352306692,289360691352306692]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v32i8_and4:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [289360691352306692,289360691352306692,289360691352306692,289360691352306692]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v32i8_and4:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <32 x i8> %arg, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
   %tmp1 = icmp ne <32 x i8> %tmp, zeroinitializer
   %tmp2 = bitcast <32 x i1> %tmp1 to i32
@@ -2617,24 +2586,11 @@ define i1 @allzeros_v8i16_and4(<8 x i16> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v8i16_and4:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v8i16_and4:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v8i16_and4:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1125917086973956,1125917086973956]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v8i16_and4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <8 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
   %tmp1 = icmp ne <8 x i16> %tmp, zeroinitializer
   %tmp2 = bitcast <8 x i1> %tmp1 to i8
@@ -2857,13 +2813,20 @@ define i1 @allzeros_v16i16_and4(<16 x i16> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v16i16_and4:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1125917086973956,1125917086973956,1125917086973956,1125917086973956]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v16i16_and4:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1125917086973956,1125917086973956,1125917086973956,1125917086973956]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v16i16_and4:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <16 x i16> %arg, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
   %tmp1 = icmp ne <16 x i16> %tmp, zeroinitializer
   %tmp2 = bitcast <16 x i1> %tmp1 to i16
@@ -2927,24 +2890,11 @@ define i1 @allzeros_v4i32_and4(<4 x i32> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v4i32_and4:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v4i32_and4:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v4i32_and4:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [17179869188,17179869188]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v4i32_and4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <4 x i32> %arg, <i32 4, i32 4, i32 4, i32 4>
   %tmp1 = icmp ne <4 x i32> %tmp, zeroinitializer
   %tmp2 = bitcast <4 x i1> %tmp1 to i4
@@ -3040,13 +2990,20 @@ define i1 @allzeros_v8i32_and4(<8 x i32> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v8i32_and4:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [17179869188,17179869188,17179869188,17179869188]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v8i32_and4:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [17179869188,17179869188,17179869188,17179869188]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v8i32_and4:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <8 x i32> %arg, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
   %tmp1 = icmp ne <8 x i32> %tmp, zeroinitializer
   %tmp2 = bitcast <8 x i1> %tmp1 to i8
@@ -3222,24 +3179,11 @@ define i1 @allzeros_v2i64_and4(<2 x i64> %arg) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: allzeros_v2i64_and4:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; KNL-LABEL: allzeros_v2i64_and4:
-; KNL:       # %bb.0:
-; KNL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; KNL-NEXT:    sete %al
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: allzeros_v2i64_and4:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4,4]
-; SKX-NEXT:    vptest %xmm1, %xmm0
-; SKX-NEXT:    sete %al
-; SKX-NEXT:    retq
+; AVX-LABEL: allzeros_v2i64_and4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %tmp = and <2 x i64> %arg, <i64 4, i64 4>
   %tmp1 = icmp ne <2 x i64> %tmp, zeroinitializer
   %tmp2 = bitcast <2 x i1> %tmp1 to i2
@@ -3335,13 +3279,20 @@ define i1 @allzeros_v4i64_and4(<4 x i64> %arg) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: allzeros_v4i64_and4:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4,4,4,4]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; KNL-LABEL: allzeros_v4i64_and4:
+; KNL:       # %bb.0:
+; KNL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4,4,4,4]
+; KNL-NEXT:    vptest %ymm1, %ymm0
+; KNL-NEXT:    sete %al
+; KNL-NEXT:    vzeroupper
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: allzeros_v4i64_and4:
+; SKX:       # %bb.0:
+; SKX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; SKX-NEXT:    sete %al
+; SKX-NEXT:    vzeroupper
+; SKX-NEXT:    retq
   %tmp = and <4 x i64> %arg, <i64 4, i64 4, i64 4, i64 4>
   %tmp1 = icmp ne <4 x i64> %tmp, zeroinitializer
   %tmp2 = bitcast <4 x i1> %tmp1 to i4
@@ -3456,8 +3407,7 @@ define i1 @allzeros_v8i64_and4(<8 x i64> %arg) {
 ;
 ; AVX512-LABEL: allzeros_v8i64_and4:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4]
-; AVX512-NEXT:    vptestmd %zmm1, %zmm0, %k0
+; AVX512-NEXT:    vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0
 ; AVX512-NEXT:    kortestw %k0, %k0
 ; AVX512-NEXT:    sete %al
 ; AVX512-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/shuffle-half.ll b/llvm/test/CodeGen/X86/shuffle-half.ll
index 0d27fc3896766..530280d916f1f 100644
--- a/llvm/test/CodeGen/X86/shuffle-half.ll
+++ b/llvm/test/CodeGen/X86/shuffle-half.ll
@@ -324,8 +324,7 @@ define <32 x half> @build_vec(ptr %p, <32 x i1> %mask) {
 ; CHECK-NEXT:    jne .LBB1_4
 ; CHECK-NEXT:    jmp .LBB1_5
 ; CHECK-NEXT:  .LBB1_1:
-; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
-; CHECK-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; CHECK-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
 ; CHECK-NEXT:    testb $2, %al
 ; CHECK-NEXT:    je .LBB1_5
 ; CHECK-NEXT:  .LBB1_4: # %cond.load1
diff --git a/llvm/test/CodeGen/X86/sse2.ll b/llvm/test/CodeGen/X86/sse2.ll
index 231f274db83cd..65e441fd0d2fb 100644
--- a/llvm/test/CodeGen/X86/sse2.ll
+++ b/llvm/test/CodeGen/X86/sse2.ll
@@ -401,26 +401,15 @@ define void @test12() nounwind {
 ; SSE-NEXT:    movaps %xmm2, 0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
-; AVX1-LABEL: test12:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovaps 0, %xmm0
-; AVX1-NEXT:    vblendps {{.*#+}} xmm1 = xmm0[0,1],mem[2,3]
-; AVX1-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
-; AVX1-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vmovaps %xmm0, 0
-; AVX1-NEXT:    ret{{[l|q]}}
-;
-; AVX512-LABEL: test12:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovaps 0, %xmm0
-; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
-; AVX512-NEXT:    vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
-; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
-; AVX512-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; AVX512-NEXT:    vmovaps %xmm0, 0
-; AVX512-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: test12:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps 0, %xmm0
+; AVX-NEXT:    vblendps {{.*#+}} xmm1 = xmm0[0,1],mem[2,3]
+; AVX-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, 0
+; AVX-NEXT:    ret{{[l|q]}}
   %tmp1 = load <4 x float>, ptr null          ; <<4 x float>> [#uses=2]
   %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >             ; <<4 x float>> [#uses=1]
   %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >                ; <<4 x float>> [#uses=1]
@@ -701,6 +690,8 @@ define <4 x i32> @test_mul(<4 x i32> %x, <4 x i32> %y) {
   ret <4 x i32> %m
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX512: {{.*}}
 ; X64-AVX1: {{.*}}
 ; X64-AVX512: {{.*}}
 ; X86-AVX1: {{.*}}
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
index 7c788d291a5c7..d2caeda248356 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
@@ -910,14 +910,14 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
 ;
 ; AVX512VL-32-LABEL: uitofp_v2i1_v2f64:
 ; AVX512VL-32:       # %bb.0:
-; AVX512VL-32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512VL-32-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX512VL-32-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
 ; AVX512VL-32-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; AVX512VL-32-NEXT:    retl
 ;
 ; AVX512VL-64-LABEL: uitofp_v2i1_v2f64:
 ; AVX512VL-64:       # %bb.0:
-; AVX512VL-64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512VL-64-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; AVX512VL-64-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; AVX512VL-64-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; AVX512VL-64-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vec_fabs.ll b/llvm/test/CodeGen/X86/vec_fabs.ll
index 0fb26ff42d6ce..902304126a4ca 100644
--- a/llvm/test/CodeGen/X86/vec_fabs.ll
+++ b/llvm/test/CodeGen/X86/vec_fabs.ll
@@ -160,12 +160,26 @@ define <8 x half> @fabs_v8f16(ptr %p) nounwind {
 ; X86-AVX2-NEXT:    vpand (%eax), %xmm0, %xmm0
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fabs_v8f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X86-AVX512-NEXT:    vpand (%eax), %xmm0, %xmm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fabs_v8f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fabs_v8f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX512FP16-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fabs_v8f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX512VLDQ-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fabs_v8f16:
 ; X64-SSE:       # %bb.0:
@@ -185,11 +199,23 @@ define <8 x half> @fabs_v8f16(ptr %p) nounwind {
 ; X64-AVX2-NEXT:    vpand (%rdi), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fabs_v8f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X64-AVX512-NEXT:    vpand (%rdi), %xmm0, %xmm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fabs_v8f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fabs_v8f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX512FP16-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fabs_v8f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX512VLDQ-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    retq
   %v = load <8 x half>, ptr %p, align 16
   %nnv = call <8 x half> @llvm.fabs.v8f16(<8 x half> %v)
   ret <8 x half> %nnv
@@ -366,12 +392,26 @@ define <16 x half> @fabs_v16f16(ptr %p) nounwind {
 ; X86-AVX2-NEXT:    vpand (%eax), %ymm0, %ymm0
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fabs_v16f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X86-AVX512-NEXT:    vpand (%eax), %ymm0, %ymm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fabs_v16f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    vmovaps (%eax), %ymm0
+; X86-AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fabs_v16f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vmovaps (%eax), %ymm0
+; X86-AVX512FP16-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fabs_v16f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    vmovaps (%eax), %ymm0
+; X86-AVX512VLDQ-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fabs_v16f16:
 ; X64-SSE:       # %bb.0:
@@ -393,11 +433,23 @@ define <16 x half> @fabs_v16f16(ptr %p) nounwind {
 ; X64-AVX2-NEXT:    vpand (%rdi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fabs_v16f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X64-AVX512-NEXT:    vpand (%rdi), %ymm0, %ymm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fabs_v16f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps (%rdi), %ymm0
+; X64-AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fabs_v16f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovaps (%rdi), %ymm0
+; X64-AVX512FP16-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fabs_v16f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovaps (%rdi), %ymm0
+; X64-AVX512VLDQ-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512VLDQ-NEXT:    retq
   %v = load <16 x half>, ptr %p, align 32
   %nnv = call <16 x half> @llvm.fabs.v16f16(<16 x half> %v)
   ret <16 x half> %nnv
@@ -587,24 +639,22 @@ define <32 x half> @fabs_v32f16(ptr %p) nounwind {
 ; X86-AVX512VL-LABEL: fabs_v32f16:
 ; X86-AVX512VL:       # %bb.0:
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512VL-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X86-AVX512VL-NEXT:    vpandq (%eax), %zmm0, %zmm0
+; X86-AVX512VL-NEXT:    vmovdqa64 (%eax), %zmm0
+; X86-AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-AVX512VL-NEXT:    retl
 ;
 ; X86-AVX512FP16-LABEL: fabs_v32f16:
 ; X86-AVX512FP16:       # %bb.0:
 ; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X86-AVX512FP16-NEXT:    vpandq (%eax), %zmm0, %zmm0
+; X86-AVX512FP16-NEXT:    vmovaps (%eax), %zmm0
+; X86-AVX512FP16-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-AVX512FP16-NEXT:    retl
 ;
 ; X86-AVX512VLDQ-LABEL: fabs_v32f16:
 ; X86-AVX512VLDQ:       # %bb.0:
 ; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512VLDQ-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X86-AVX512VLDQ-NEXT:    vpandq (%eax), %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT:    vmovaps (%eax), %zmm0
+; X86-AVX512VLDQ-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fabs_v32f16:
@@ -635,22 +685,20 @@ define <32 x half> @fabs_v32f16(ptr %p) nounwind {
 ;
 ; X64-AVX512VL-LABEL: fabs_v32f16:
 ; X64-AVX512VL:       # %bb.0:
-; X64-AVX512VL-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X64-AVX512VL-NEXT:    vpandq (%rdi), %zmm0, %zmm0
+; X64-AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm0
+; X64-AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-AVX512VL-NEXT:    retq
 ;
 ; X64-AVX512FP16-LABEL: fabs_v32f16:
 ; X64-AVX512FP16:       # %bb.0:
-; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X64-AVX512FP16-NEXT:    vpandq (%rdi), %zmm0, %zmm0
+; X64-AVX512FP16-NEXT:    vmovaps (%rdi), %zmm0
+; X64-AVX512FP16-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-AVX512FP16-NEXT:    retq
 ;
 ; X64-AVX512VLDQ-LABEL: fabs_v32f16:
 ; X64-AVX512VLDQ:       # %bb.0:
-; X64-AVX512VLDQ-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X64-AVX512VLDQ-NEXT:    vpandq (%rdi), %zmm0, %zmm0
+; X64-AVX512VLDQ-NEXT:    vmovaps (%rdi), %zmm0
+; X64-AVX512VLDQ-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-AVX512VLDQ-NEXT:    retq
   %v = load <32 x half>, ptr %p, align 64
   %nnv = call <32 x half> @llvm.fabs.v32f16(<32 x half> %v)
@@ -766,3 +814,6 @@ define void @PR70947(ptr %src, ptr %dst) nounwind {
   store <2 x double> %fabs4, ptr %dst4, align 4
   ret void
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64-AVX512: {{.*}}
+; X86-AVX512: {{.*}}
diff --git a/llvm/test/CodeGen/X86/vec_fcopysign.ll b/llvm/test/CodeGen/X86/vec_fcopysign.ll
index b34b02c90796b..b540a79f9e756 100644
--- a/llvm/test/CodeGen/X86/vec_fcopysign.ll
+++ b/llvm/test/CodeGen/X86/vec_fcopysign.ll
@@ -154,14 +154,32 @@ define <8 x half> @fcopysign_v8f16(ptr %p0, ptr %p1) nounwind {
 ; X86-AVX2-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fcopysign_v8f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vmovdqa (%ecx), %xmm1
-; X86-AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147450879,2147450879,2147450879,2147450879]
-; X86-AVX512-NEXT:    vpternlogd $202, (%eax), %xmm1, %xmm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fcopysign_v8f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512VL-NEXT:    vmovdqa (%ecx), %xmm1
+; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147450879,2147450879,2147450879,2147450879]
+; X86-AVX512VL-NEXT:    vpternlogd $202, (%eax), %xmm1, %xmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fcopysign_v8f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512FP16-NEXT:    vmovdqa (%ecx), %xmm1
+; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [32767,32767,32767,32767,32767,32767,32767,32767]
+; X86-AVX512FP16-NEXT:    vpternlogd $202, (%eax), %xmm1, %xmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fcopysign_v8f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovdqa (%ecx), %xmm1
+; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147450879,2147450879,2147450879,2147450879]
+; X86-AVX512VLDQ-NEXT:    vpternlogd $202, (%eax), %xmm1, %xmm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fcopysign_v8f16:
 ; X64-SSE:       # %bb.0:
@@ -190,12 +208,26 @@ define <8 x half> @fcopysign_v8f16(ptr %p0, ptr %p1) nounwind {
 ; X64-AVX2-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fcopysign_v8f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vmovdqa (%rdi), %xmm1
-; X64-AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147450879,2147450879,2147450879,2147450879]
-; X64-AVX512-NEXT:    vpternlogd $202, (%rsi), %xmm1, %xmm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fcopysign_v8f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147450879,2147450879,2147450879,2147450879]
+; X64-AVX512VL-NEXT:    vpternlogd $202, (%rsi), %xmm1, %xmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fcopysign_v8f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [32767,32767,32767,32767,32767,32767,32767,32767]
+; X64-AVX512FP16-NEXT:    vpternlogd $202, (%rsi), %xmm1, %xmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fcopysign_v8f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147450879,2147450879,2147450879,2147450879]
+; X64-AVX512VLDQ-NEXT:    vpternlogd $202, (%rsi), %xmm1, %xmm0
+; X64-AVX512VLDQ-NEXT:    retq
   %a0 = load <8 x half>, ptr %p0, align 16
   %a1 = load <8 x half>, ptr %p1, align 16
   %t = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a0, <8 x half> %a1)
@@ -400,14 +432,32 @@ define <16 x half> @fcopysign_v16f16(ptr %p0, ptr %p1) nounwind {
 ; X86-AVX2-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fcopysign_v16f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vmovdqu (%ecx), %ymm1
-; X86-AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
-; X86-AVX512-NEXT:    vpternlogd $202, (%eax), %ymm1, %ymm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fcopysign_v16f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512VL-NEXT:    vmovdqu (%ecx), %ymm1
+; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X86-AVX512VL-NEXT:    vpternlogd $202, (%eax), %ymm1, %ymm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fcopysign_v16f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512FP16-NEXT:    vmovdqu (%ecx), %ymm1
+; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767]
+; X86-AVX512FP16-NEXT:    vpternlogd $202, (%eax), %ymm1, %ymm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fcopysign_v16f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovdqu (%ecx), %ymm1
+; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X86-AVX512VLDQ-NEXT:    vpternlogd $202, (%eax), %ymm1, %ymm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fcopysign_v16f16:
 ; X64-SSE:       # %bb.0:
@@ -441,12 +491,26 @@ define <16 x half> @fcopysign_v16f16(ptr %p0, ptr %p1) nounwind {
 ; X64-AVX2-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fcopysign_v16f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vmovdqu (%rdi), %ymm1
-; X64-AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
-; X64-AVX512-NEXT:    vpternlogd $202, (%rsi), %ymm1, %ymm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fcopysign_v16f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqu (%rdi), %ymm1
+; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X64-AVX512VL-NEXT:    vpternlogd $202, (%rsi), %ymm1, %ymm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fcopysign_v16f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovdqu (%rdi), %ymm1
+; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767]
+; X64-AVX512FP16-NEXT:    vpternlogd $202, (%rsi), %ymm1, %ymm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fcopysign_v16f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovdqu (%rdi), %ymm1
+; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X64-AVX512VLDQ-NEXT:    vpternlogd $202, (%rsi), %ymm1, %ymm0
+; X64-AVX512VLDQ-NEXT:    retq
   %a0 = load <16 x half>, ptr %p0, align 16
   %a1 = load <16 x half>, ptr %p1, align 16
   %t = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a0, <16 x half> %a1)
@@ -691,14 +755,32 @@ define <32 x half> @fcopysign_v32f16(ptr %p0, ptr %p1) nounwind {
 ; X86-AVX2-NEXT:    vpor %ymm2, %ymm1, %ymm1
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fcopysign_v32f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vmovdqu64 (%ecx), %zmm1
-; X86-AVX512-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
-; X86-AVX512-NEXT:    vpternlogd $202, (%eax), %zmm1, %zmm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fcopysign_v32f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512VL-NEXT:    vmovdqu64 (%ecx), %zmm1
+; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X86-AVX512VL-NEXT:    vpternlogd $202, (%eax), %zmm1, %zmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fcopysign_v32f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512FP16-NEXT:    vmovdqu64 (%ecx), %zmm1
+; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767]
+; X86-AVX512FP16-NEXT:    vpternlogd $202, (%eax), %zmm1, %zmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fcopysign_v32f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovdqu64 (%ecx), %zmm1
+; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X86-AVX512VLDQ-NEXT:    vpternlogd $202, (%eax), %zmm1, %zmm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fcopysign_v32f16:
 ; X64-SSE:       # %bb.0:
@@ -746,12 +828,26 @@ define <32 x half> @fcopysign_v32f16(ptr %p0, ptr %p1) nounwind {
 ; X64-AVX2-NEXT:    vpor %ymm2, %ymm1, %ymm1
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fcopysign_v32f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vmovdqu64 (%rdi), %zmm1
-; X64-AVX512-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
-; X64-AVX512-NEXT:    vpternlogd $202, (%rsi), %zmm1, %zmm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fcopysign_v32f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqu64 (%rdi), %zmm1
+; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X64-AVX512VL-NEXT:    vpternlogd $202, (%rsi), %zmm1, %zmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fcopysign_v32f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovdqu64 (%rdi), %zmm1
+; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767]
+; X64-AVX512FP16-NEXT:    vpternlogd $202, (%rsi), %zmm1, %zmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fcopysign_v32f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovdqu64 (%rdi), %zmm1
+; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879,2147450879]
+; X64-AVX512VLDQ-NEXT:    vpternlogd $202, (%rsi), %zmm1, %zmm0
+; X64-AVX512VLDQ-NEXT:    retq
   %a0 = load <32 x half>, ptr %p0, align 16
   %a1 = load <32 x half>, ptr %p1, align 16
   %t = call <32 x half> @llvm.copysign.v32f16(<32 x half> %a0, <32 x half> %a1)
@@ -761,11 +857,5 @@ declare <32 x half> @llvm.copysign.v32f16(<32 x half>, <32 x half>)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; X64: {{.*}}
 ; X64-AVX: {{.*}}
-; X64-AVX512FP16: {{.*}}
-; X64-AVX512VL: {{.*}}
-; X64-AVX512VLDQ: {{.*}}
 ; X86: {{.*}}
 ; X86-AVX: {{.*}}
-; X86-AVX512FP16: {{.*}}
-; X86-AVX512VL: {{.*}}
-; X86-AVX512VLDQ: {{.*}}
diff --git a/llvm/test/CodeGen/X86/vec_fneg.ll b/llvm/test/CodeGen/X86/vec_fneg.ll
index b14da0a2c2712..0e8b053553e67 100644
--- a/llvm/test/CodeGen/X86/vec_fneg.ll
+++ b/llvm/test/CodeGen/X86/vec_fneg.ll
@@ -158,12 +158,26 @@ define <8 x half> @fneg_v8f16(ptr %p) nounwind {
 ; X86-AVX2-NEXT:    vpxor (%eax), %xmm0, %xmm0
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fneg_v8f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-AVX512-NEXT:    vpxor (%eax), %xmm0, %xmm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fneg_v8f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v8f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v8f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    vmovaps (%eax), %xmm0
+; X86-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fneg_v8f16:
 ; X64-SSE:       # %bb.0:
@@ -183,11 +197,23 @@ define <8 x half> @fneg_v8f16(ptr %p) nounwind {
 ; X64-AVX2-NEXT:    vpxor (%rdi), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fneg_v8f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X64-AVX512-NEXT:    vpxor (%rdi), %xmm0, %xmm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fneg_v8f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v8f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v8f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    retq
   %v = load <8 x half>, ptr %p, align 16
   %nnv = fsub <8 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %v
   ret <8 x half> %nnv
@@ -361,12 +387,26 @@ define <16 x half> @fneg_v16f16(ptr %p) nounwind {
 ; X86-AVX2-NEXT:    vpxor (%eax), %ymm0, %ymm0
 ; X86-AVX2-NEXT:    retl
 ;
-; X86-AVX512-LABEL: fneg_v16f16:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-AVX512-NEXT:    vpxor (%eax), %ymm0, %ymm0
-; X86-AVX512-NEXT:    retl
+; X86-AVX512VL-LABEL: fneg_v16f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    vmovups (%eax), %ymm0
+; X86-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v16f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vmovups (%eax), %ymm0
+; X86-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v16f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    vmovups (%eax), %ymm0
+; X86-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fneg_v16f16:
 ; X64-SSE:       # %bb.0:
@@ -388,11 +428,23 @@ define <16 x half> @fneg_v16f16(ptr %p) nounwind {
 ; X64-AVX2-NEXT:    vpxor (%rdi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    retq
 ;
-; X64-AVX512-LABEL: fneg_v16f16:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X64-AVX512-NEXT:    vpxor (%rdi), %ymm0, %ymm0
-; X64-AVX512-NEXT:    retq
+; X64-AVX512VL-LABEL: fneg_v16f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovups (%rdi), %ymm0
+; X64-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v16f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vmovups (%rdi), %ymm0
+; X64-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v16f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovups (%rdi), %ymm0
+; X64-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512VLDQ-NEXT:    retq
   %v = load <16 x half>, ptr %p, align 16
   %nnv = fsub <16 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %v
   ret <16 x half> %nnv
@@ -579,24 +631,22 @@ define <32 x half> @fneg_v32f16(ptr %p) nounwind {
 ; X86-AVX512VL-LABEL: fneg_v32f16:
 ; X86-AVX512VL:       # %bb.0:
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512VL-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X86-AVX512VL-NEXT:    vpxorq (%eax), %zmm0, %zmm0
+; X86-AVX512VL-NEXT:    vmovdqu64 (%eax), %zmm0
+; X86-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-AVX512VL-NEXT:    retl
 ;
 ; X86-AVX512FP16-LABEL: fneg_v32f16:
 ; X86-AVX512FP16:       # %bb.0:
 ; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-AVX512FP16-NEXT:    vpxorq (%eax), %zmm0, %zmm0
+; X86-AVX512FP16-NEXT:    vmovups (%eax), %zmm0
+; X86-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-AVX512FP16-NEXT:    retl
 ;
 ; X86-AVX512VLDQ-LABEL: fneg_v32f16:
 ; X86-AVX512VLDQ:       # %bb.0:
 ; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512VLDQ-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X86-AVX512VLDQ-NEXT:    vpxorq (%eax), %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT:    vmovups (%eax), %zmm0
+; X86-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-SSE-LABEL: fneg_v32f16:
@@ -627,22 +677,20 @@ define <32 x half> @fneg_v32f16(ptr %p) nounwind {
 ;
 ; X64-AVX512VL-LABEL: fneg_v32f16:
 ; X64-AVX512VL:       # %bb.0:
-; X64-AVX512VL-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X64-AVX512VL-NEXT:    vpxorq (%rdi), %zmm0, %zmm0
+; X64-AVX512VL-NEXT:    vmovdqu64 (%rdi), %zmm0
+; X64-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-AVX512VL-NEXT:    retq
 ;
 ; X64-AVX512FP16-LABEL: fneg_v32f16:
 ; X64-AVX512FP16:       # %bb.0:
-; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X64-AVX512FP16-NEXT:    vpxorq (%rdi), %zmm0, %zmm0
+; X64-AVX512FP16-NEXT:    vmovups (%rdi), %zmm0
+; X64-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-AVX512FP16-NEXT:    retq
 ;
 ; X64-AVX512VLDQ-LABEL: fneg_v32f16:
 ; X64-AVX512VLDQ:       # %bb.0:
-; X64-AVX512VLDQ-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; X64-AVX512VLDQ-NEXT:    vpxorq (%rdi), %zmm0, %zmm0
+; X64-AVX512VLDQ-NEXT:    vmovups (%rdi), %zmm0
+; X64-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-AVX512VLDQ-NEXT:    retq
   %v = load <32 x half>, ptr %p, align 16
   %nnv = fsub <32 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %v
@@ -650,4 +698,6 @@ define <32 x half> @fneg_v32f16(ptr %p) nounwind {
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; X64: {{.*}}
+; X64-AVX512: {{.*}}
 ; X86: {{.*}}
+; X86-AVX512: {{.*}}
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 05be4e1ee928e..35df12c28b345 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -1040,7 +1040,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpandn %xmm3, %xmm2, %xmm4
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
@@ -1071,7 +1071,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpandn %xmm3, %xmm2, %xmm4
 ; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VLBW-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/vector-fshl-512.ll b/llvm/test/CodeGen/X86/vector-fshl-512.ll
index 0724d879f557d..bddb7006f751d 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-512.ll
@@ -562,7 +562,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpandn %xmm3, %xmm2, %xmm4
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm5
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm5, %ymm5
@@ -597,7 +597,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpandn %xmm3, %xmm2, %xmm4
 ; AVX512VLBW-NEXT:    vpsrlw $1, %zmm1, %zmm1
 ; AVX512VLBW-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index 58719e6bd8e0c..14b16e5b4d538 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -796,7 +796,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounw
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm4, %ymm3
@@ -818,7 +818,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounw
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VLBW-NEXT:    vpsrlw $1, %ymm0, %ymm4
 ; AVX512VLBW-NEXT:    vpsrlw %xmm3, %ymm4, %ymm3
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
index edfa56a70d59e..0eb590b5a26d2 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
@@ -317,7 +317,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm4, %ymm5
@@ -345,7 +345,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VLBW-NEXT:    vpsrlw $1, %zmm0, %zmm4
 ; AVX512VLBW-NEXT:    vpsrlw %xmm3, %zmm4, %zmm3
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index 1a6ecea596563..edfdafc028124 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -1072,7 +1072,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
 ; AVX512VL-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpandn %xmm3, %xmm2, %xmm2
@@ -1103,7 +1103,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
 ; AVX512VLBW-NEXT:    vpsrlw %xmm4, %ymm1, %ymm1
 ; AVX512VLBW-NEXT:    vpandn %xmm3, %xmm2, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshr-512.ll b/llvm/test/CodeGen/X86/vector-fshr-512.ll
index 1c6646152ab1f..5c988f9e35272 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-512.ll
@@ -564,7 +564,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpand %xmm3, %xmm2, %xmm4
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm5
 ; AVX512VL-NEXT:    vpsrlw %xmm4, %ymm5, %ymm5
@@ -600,7 +600,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y, <32 x i
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpand %xmm3, %xmm2, %xmm4
 ; AVX512VLBW-NEXT:    vpsrlw %xmm4, %zmm1, %zmm1
 ; AVX512VLBW-NEXT:    vpandn %xmm3, %xmm2, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index bb311468ce913..fd3605d56b38c 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -835,7 +835,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounw
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm3
 ; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm0, %ymm3
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm1
@@ -857,7 +857,7 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %amt) nounw
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v16i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
 ; AVX512VLBW-NEXT:    vpsrlw %xmm3, %ymm0, %ymm3
 ; AVX512VLBW-NEXT:    vpandn %xmm2, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll
index 4364c047dfdeb..3c0d89f6e2786 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll
@@ -315,7 +315,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
 ;
 ; AVX512VL-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpand %xmm2, %xmm1, %xmm3
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
 ; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm4, %ymm5
@@ -343,7 +343,7 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
 ;
 ; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpand %xmm2, %xmm1, %xmm3
 ; AVX512VLBW-NEXT:    vpsrlw %xmm3, %zmm0, %zmm3
 ; AVX512VLBW-NEXT:    vpandn %xmm2, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
index 8eb2668760040..620f53412d450 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -2468,27 +2468,28 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60]
 ; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm4, %zmm5
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm4
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61,1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61]
 ; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm5, %zmm6
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm5
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62,2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62]
 ; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm6, %zmm7
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm6
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = [2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm7[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63,3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63]
 ; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm7, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm7, %zmm0
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm7
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm7[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rcx)
@@ -5350,63 +5351,64 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512BW-LABEL: load_i16_stride4_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm7
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512BW-NEXT:    vpermt2w %zmm6, %zmm8, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-NEXT:    vpermt2w %zmm4, %zmm8, %zmm10
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm8, %zmm10
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm8
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm10[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm8, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm10, %zmm11
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm3, %zmm8
+; AVX512BW-NEXT:    vpermi2w %zmm0, %zmm1, %zmm10
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[0,1,2,3],zmm8[4,5,6,7]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61,1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61]
 ; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm11
-; AVX512BW-NEXT:    vpermt2w %zmm6, %zmm10, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm4, %zmm10, %zmm12
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm12[0,1,2,3],zmm11[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm10, %zmm12
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm10
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm12[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
+; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm10, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm12 = [1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm12, %zmm13
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm13[0,1,2,3],zmm11[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm3, %zmm10
+; AVX512BW-NEXT:    vpermi2w %zmm0, %zmm1, %zmm12
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62,2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62]
 ; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-NEXT:    vpermt2w %zmm6, %zmm12, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm14
-; AVX512BW-NEXT:    vpermt2w %zmm4, %zmm12, %zmm14
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm13[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm12, %zmm14
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm12
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],zmm14[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm12, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,6,10,14,18,22,26,30,34,38,42,46,50,54,58,62]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm15
+; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm14, %zmm15
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm15[0,1,2,3],zmm13[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm3, %zmm12
+; AVX512BW-NEXT:    vpermi2w %zmm0, %zmm1, %zmm14
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm14[0,1,2,3],zmm12[4,5,6,7]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63,3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63]
 ; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2w %zmm6, %zmm14, %zmm7
-; AVX512BW-NEXT:    vpermt2w %zmm4, %zmm14, %zmm5
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7]
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm14, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm14, %zmm0
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, 64(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%r8)
+; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm14, %zmm6
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = [3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63]
+; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm7, %zmm4
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm6[4,5,6,7]
+; AVX512BW-NEXT:    vpermt2w %zmm2, %zmm14, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm0, %zmm7, %zmm1
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, 64(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%r8)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <256 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index f5399e791f6e8..4c9297210a94d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -7385,11 +7385,11 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [4,5,14,15,8,9,2,3,12,13,6,7,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm5
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [1,4,6,0,1,4,6,0]
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm23, %ymm5, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm14, %ymm14
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [1,4,6,0,1,4,6,0]
+; AVX512F-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm23, %ymm11, %ymm14
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm14, %ymm14
 ; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm21, %zmm14
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm14, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -7424,8 +7424,8 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm25, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm29, %ymm5, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpermd %ymm29, %ymm11, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
 ; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm21, %zmm2
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index 893bff29b21e4..5a6f31379b6eb 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -13494,38 +13494,37 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride7_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1768, %rsp # imm = 0x6E8
+; AVX512F-ONLY-FAST-NEXT:    subq $1736, %rsp # imm = 0x6C8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,5,9,u,12,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm23
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm5, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 480(%rdi), %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm21
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 672(%rdi), %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm17
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm28 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm24[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm24[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm7
@@ -13535,209 +13534,211 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm13, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm14, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3,4,5],xmm3[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1],ymm11[2],ymm3[3,4,5],ymm11[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5,6],ymm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0],xmm13[1],xmm15[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm15, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1,2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm13[2],ymm2[3,4,5],ymm13[6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm4[0,1,2,3],xmm7[4],xmm4[5],xmm7[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm8[0,1,2,3,4,5,6],ymm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm15[0],xmm14[1],xmm15[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm15, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm11, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm6[0,1,2],ymm15[3],ymm6[4,5],ymm15[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3,4,5],xmm10[6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm9, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm30[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5,6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 688(%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm4[0],xmm2[1],xmm4[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm5[0,1],ymm0[2],ymm5[3,4,5],ymm0[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm21[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5,6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 688(%rdi), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm3[0],xmm1[1],xmm3[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <2,6,9,u,13,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm17, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm5[0],xmm8[1],xmm5[2,3,4,5],xmm8[6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1,2],ymm11[3],ymm3[4,5],ymm11[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm20, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0],ymm12[1],ymm0[2,3],ymm12[4],ymm0[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1],xmm8[2,3,4,5],xmm9[6],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm7, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm10, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm13[3],ymm2[4,5],ymm13[6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2,3,4,5],xmm8[6],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm30
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [2,5,2,5,2,5,2,5]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm24, %ymm11, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0,1,2,3,4,5,6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm13, %xmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm13[1],xmm12[2,3,4,5],xmm13[6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm12, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm5, %ymm10, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3],ymm5[4,5],ymm7[6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2,3,4,5],xmm5[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm30, %ymm11, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm31
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm7, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm19 = [2,5,2,5,2,5,2,5]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm24, %ymm19, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm8[0,1,2,3,4,5,6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm4[0],xmm14[1],xmm4[1],xmm14[2],xmm4[2],xmm14[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm5[1],xmm9[2],xmm5[3],xmm9[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0],ymm6[1],ymm15[2,3],ymm6[4],ymm15[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3,4,5],xmm14[6],xmm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm13, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm7, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm11, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3,4,5],xmm7[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm21, %ymm19, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5,6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0],ymm12[1],ymm0[2,3,4],ymm12[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2],xmm9[3],xmm10[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [10,3,6,15,12,13,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm27, %zmm1, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm10, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm27, %zmm1, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm11, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm11, %ymm9, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm10[1],xmm9[2,3,4,5],xmm10[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm11 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm11[1],xmm9[2,3,4,5],xmm11[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm12 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm24[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm9, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0,1,2,3,4,5,6],ymm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} xmm13 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm9, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} xmm14 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0],ymm14[1],ymm3[2,3,4],ymm14[5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm15[0],xmm8[1],xmm15[2],xmm8[3],xmm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm1, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm12, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm5, %ymm8, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0],ymm2[1],ymm15[2,3,4],ymm2[5],ymm15[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2],xmm6[3],xmm8[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm1, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm8, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2,3,4,5],xmm8[6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm30[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm8, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm21[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm20, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [3,6,10,13,3,6,10,13]
-; AVX512F-ONLY-FAST-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm23, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm17, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [3,6,10,13,3,6,10,13]
+; AVX512F-ONLY-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm20, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5,6],ymm4[7]
 ; AVX512F-ONLY-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm22 {%k1} # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm4, %zmm16 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
@@ -13746,206 +13747,200 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 680(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm18, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,3,3,3,0,3,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm24, %ymm2, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,3,3,3,0,3,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm24, %ymm11, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [2,5,9,12,2,5,9,12]
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [2,5,9,12,2,5,9,12]
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm2, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7],ymm3[8,9,10,11,12],ymm1[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm17, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm10[4],xmm3[5],xmm10[5],xmm3[6],xmm10[6],xmm3[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1,2,3,4],ymm1[5,6,7],ymm6[8,9,10,11,12],ymm1[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm13, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm10[4],xmm6[5],xmm10[5],xmm6[6],xmm10[6],xmm6[7],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm10, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm3[2,3],ymm8[4,5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4],xmm1[5],xmm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1],ymm15[2,3],ymm14[4,5],ymm15[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0,1,2],xmm1[3],xmm6[4],xmm1[5],xmm6[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [8,9,6,7,4,5,10,11,8,9,6,7,4,5,10,11]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm19
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,u,u,u,4,7,11,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4,5,6],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm23, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0,1,2],xmm1[3,4,5,6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm20, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3,4,5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm29 {%k1} # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm29 {%k1} # 16-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm30, %ymm2, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm31, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm21, %ymm11, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm2, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7],ymm0[8,9,10,11,12],ymm6[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm19, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [0,4,7,0,0,4,7,0]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm18 = [0,4,7,0,0,4,7,0]
+; AVX512F-ONLY-FAST-NEXT:    # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm24, %ymm18, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,6,9,13,2,6,9,13]
 ; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm24, %ymm9, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [2,6,9,13,2,6,9,13]
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm9, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm0[5,6,7],ymm6[8,9,10,11,12],ymm0[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm5, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1,2],xmm3[3],xmm12[4],xmm3[5],xmm12[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm16, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm12, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0,1,2],xmm3[3,4,5,6],xmm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm11, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm5 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm2, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm12[0,1,2],xmm1[3],xmm12[4],xmm1[5],xmm12[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm16, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm12, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3,4,5,6],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm10, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1,2],ymm4[3],ymm6[4,5],ymm4[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm4, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <1,u,u,u,4,8,11,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm16, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm16, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm12[0,1,2],xmm1[3,4,5,6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm10, %zmm9, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1,2],xmm3[3,4,5,6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm11, %zmm15, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm12[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2],ymm0[3],ymm6[4,5],ymm0[6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1,2,3],xmm3[4],xmm12[5],xmm3[6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3,4,5,6],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm12[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2],ymm15[3],ymm14[4,5],ymm15[6],ymm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm12[0,1,2,3],xmm1[4],xmm12[5],xmm1[6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4,5,6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm9, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1],ymm13[2],ymm10[3,4,5],ymm13[6],ymm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm13[2],ymm6[3,4,5],ymm13[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm24
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,u,u,u,5,8,12,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm3, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm3, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm5, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm11, %zmm23, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm10, %zmm20, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm6[2],ymm0[3,4,5],ymm6[6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5],xmm5[6],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm14[2],ymm15[3,4,5],ymm14[6],ymm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm3, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm23, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm20, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm30, %ymm9, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm21, %ymm18, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm1[2],ymm14[3,4,5],ymm1[6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm1[2],ymm13[3,4,5],ymm1[6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm13, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm13[3],ymm4[4,5],ymm13[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1,2],ymm4[3],ymm6[4,5],ymm4[6],ymm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,11,2,11,12,5,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm5, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm5, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm9, %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7],ymm9[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm9, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 864(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 832(%rdi), %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm1[2],ymm13[3,4,5],ymm1[6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 832(%rdi), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm1[2],ymm9[3,4,5],ymm1[6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm5, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm15[0,1,2],ymm14[3],ymm15[4,5],ymm14[6],ymm15[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3,4,5],xmm2[6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -13954,38 +13949,36 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1],ymm9[2],ymm12[3,4],ymm9[5],ymm12[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm10[2],ymm15[3,4],ymm10[5],ymm15[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,3,7,10,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm15, %zmm16, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm18, %zmm16, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm26, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm27, %zmm25
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2],ymm8[3],ymm14[4,5],ymm8[6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1,2],ymm7[3],ymm13[4,5],ymm7[6],ymm13[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3,4,5],xmm1[6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0],ymm4[1],ymm11[2,3],ymm4[4],ymm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm11
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm11[1],xmm5[2,3,4,5],xmm11[6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <2,u,u,u,6,9,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm18, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <2,u,u,u,6,9,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm21, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
@@ -13995,29 +13988,25 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm16, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm26, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm6[3],ymm13[4,5],ymm6[6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm18, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm21, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0],ymm7[1],ymm10[2,3],ymm7[4],ymm10[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm10, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0],ymm6[1],ymm14[2,3],ymm6[4],ymm14[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3,4,5],xmm5[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -14025,201 +14014,202 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm9[2,3],ymm12[4,5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm9, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm10[2,3],ymm15[4,5],ymm10[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm22
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm8, %xmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm12, %xmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,4,7,11,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm15, %zmm16, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm18, %zmm16, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm24
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0],ymm14[1],ymm5[2,3],ymm14[4],ymm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0],ymm13[1],ymm7[2,3],ymm13[4],ymm7[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm12[1],xmm0[2,3,4,5],xmm12[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0],ymm2[1],ymm8[2,3,4],ymm2[5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0],ymm2[1],ymm5[2,3,4],ymm2[5],ymm5[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm15
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm15[0],xmm12[1],xmm15[2],xmm12[3],xmm15[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <3,u,u,u,6,10,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm10, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <3,u,u,u,6,10,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm18, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm12, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm23
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm17 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0],ymm9[1],ymm6[2,3],ymm9[4],ymm6[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm9, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm8, %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm11
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2,3,4,5],xmm11[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0],xmm4[1],xmm11[2],xmm4[3],xmm11[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm18, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm11, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0],ymm6[1],ymm14[2,3,4],ymm6[5],ymm14[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm11, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,4,8,11,15,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm3, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm11, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm24, %zmm3, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm11, %ymm11
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm11[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm5[0],ymm14[1],ymm5[2,3,4],ymm14[5],ymm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm2[2],ymm8[3,4],ymm2[5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm13[1],ymm4[2,3,4],ymm13[5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm5[0,1],ymm2[2],ymm5[3,4],ymm2[5],ymm5[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,1,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,10,3,14,7,10,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm2, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,1,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,10,3,14,7,10,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm5, %zmm13
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0],ymm8[1,2],ymm13[3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0],ymm6[1,2],ymm13[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7],ymm8[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3],ymm5[4,5],ymm8[6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm15, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,4,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0],ymm9[1],ymm5[2,3,4],ymm9[5],ymm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3],xmm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm5[0,1],ymm10[2],ymm5[3,4],ymm10[5],ymm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,3,1,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7],ymm6[8,9,10],ymm4[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm15, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1],ymm3[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm2[0],ymm9[1],ymm2[2,3,4],ymm9[5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2],xmm9[3],xmm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm8[0,1],ymm2[2],ymm8[3,4],ymm2[5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm10, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,3,1,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm2, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm5, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm13 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm17 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm16 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm10, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm9[1,2],ymm7[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm31 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm16 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm8, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm8, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm10[1,2],ymm7[3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm5 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm8 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm8 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm9 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm9 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm10, %zmm9 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm10 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm13, %zmm10 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm14, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm13, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm14, %zmm10 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm15, %zmm13 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm1 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm1 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 64(%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 64(%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 64(%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm19, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm1 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm18, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm3 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm3 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm0 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1768, %rsp # imm = 0x6E8
+; AVX512F-ONLY-FAST-NEXT:    addq $1736, %rsp # imm = 0x6C8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -15096,34 +15086,35 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512DQ-FAST-LABEL: load_i16_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1288, %rsp # imm = 0x508
+; AVX512DQ-FAST-NEXT:    subq $1240, %rsp # imm = 0x4D8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <2,5,9,u,12,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm18, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,5,9,u,12,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm17, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,1,12,5,12,5,14,15]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm29
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa 480(%rdi), %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqa 480(%rdi), %ymm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm15[2],ymm6[3,4,5],ymm15[6],ymm6[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm22
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm12[2],ymm6[3,4,5],ymm12[6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm29
+; AVX512DQ-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa 672(%rdi), %xmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm7
 ; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm24 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm19
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm19[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm22
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm22[0,1,0,2]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm6
@@ -15133,9 +15124,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; AVX512DQ-FAST-NEXT:    vporq %ymm1, %ymm2, %ymm28
-; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
+; AVX512DQ-FAST-NEXT:    vporq %ymm1, %ymm2, %ymm31
+; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm14, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm2
 ; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -15143,7 +15134,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm8, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm8, %ymm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm16
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2,3,4,5],xmm1[6],xmm4[7]
@@ -15151,10 +15142,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm8[2],ymm1[3,4,5],ymm8[6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm0[2],ymm8[3,4,5],ymm0[6],ymm8[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
@@ -15163,177 +15154,181 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm9
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm6[0,1,2,3,4,5,6],ymm9[7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0],xmm3[1],xmm13[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm13, %xmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0],xmm14[1],xmm13[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm13, %xmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm10, %xmm10
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm15[3],ymm0[4,5],ymm15[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm12[3],ymm0[4,5],ymm12[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm25
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm7
-; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm18, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm17
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm0[2],ymm3[3,4,5],ymm0[6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512DQ-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm30
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm21[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm30[0,1,0,2]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5,6],ymm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 688(%rdi), %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, (%rsp) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0],xmm3[1],xmm12[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,6,9,u,13,u,u,u>
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 688(%rdi), %xmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0],xmm1[1],xmm11[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,6,9,u,13,u,u,u>
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0],ymm14[1],ymm0[2,3],ymm14[4],ymm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2,3,4,5],xmm5[6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm2[0],xmm5[1],xmm2[2,3,4,5],xmm5[6],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm4, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1,2],ymm8[3],ymm11[4,5],ymm8[6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm20
+; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm7
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm8, %ymm29
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3,4,5],xmm5[6],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm16
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [2,5,2,5,2,5,2,5]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm19, %ymm10, %ymm13
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm16 = [2,5,2,5,2,5,2,5]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm22, %ymm16, %ymm13
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1,2,3,4,5,6],ymm13[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm14
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0],ymm1[1],ymm15[2,3],ymm1[4],ymm15[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3,4,5],xmm14[6],xmm13[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm15
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2,3,4,5],xmm15[6],xmm13[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm9
-; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm4, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm9, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2],ymm11[3],ymm13[4,5],ymm11[6],ymm13[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm8[0,1,2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3,4,5],xmm2[6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermd %ymm21, %ymm10, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermd %ymm30, %ymm16, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm12, %xmm22
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm7[1],ymm0[2,3,4],ymm7[5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0],xmm2[1],xmm6[2],xmm2[3],xmm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm16, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm9, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm9, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm8, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm8[1],ymm3[2,3],ymm8[4],ymm3[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm9[1],xmm2[2,3,4,5],xmm9[6],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm4
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0],ymm3[1],ymm10[2,3,4],ymm3[5],ymm10[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [10,3,6,15,12,13,6,15]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm17, %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm9, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm9, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm9
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm9[1],xmm4[2,3,4,5],xmm9[6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm15
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm10 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm19[0,1,1,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm2, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3,4,5,6],ymm12[7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} xmm14 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm20
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0],ymm1[1],ymm15[2,3,4],ymm1[5],ymm15[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm5[1],xmm12[2],xmm5[3],xmm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm12, %ymm6
-; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm22[0,1,1,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm14
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} xmm15 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm14, %zmm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm14[0],xmm5[1],xmm14[2],xmm5[3],xmm14[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm17, %zmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm2
+; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm5, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0],ymm13[1],ymm11[2,3],ymm13[4],ymm11[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm6
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm21[0,1,1,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0],ymm8[1],ymm12[2,3],ymm8[4],ymm12[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3,4,5],xmm5[6],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm30[0,1,1,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm9[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm23
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm8[1],ymm3[2,3,4],ymm8[5],ymm3[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm23
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm1[1],ymm3[2,3,4],ymm1[5],ymm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm8
-; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm31, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm8
+; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm27, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm2, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm2, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm18
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm26, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm19
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm20, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm2, %zmm29 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0],ymm13[1],ymm11[2,3,4],ymm13[5],ymm11[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm2, %zmm28 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0],ymm7[1],ymm12[2,3,4],ymm7[5],ymm12[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
@@ -15341,187 +15336,181 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 680(%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm22, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm11, %xmm7
+; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm11, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,3,3,3,0,3,7,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm19, %ymm13, %ymm1
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [0,3,3,3,0,3,7,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm22, %ymm28, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm1, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm3
 ; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [2,5,9,12,2,5,9,12]
 ; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm5, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm5, %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6,7],ymm7[8,9,10,11,12],ymm2[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0,1,2,3,4],ymm2[5,6,7],ymm6[8,9,10,11,12],ymm2[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm11, %xmm7
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm11, %xmm6
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm7[2,3],ymm9[4,5],ymm7[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm9, %ymm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm17
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0,1,2],xmm2[3],xmm9[4],xmm2[5],xmm9[6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm14 = [8,9,6,7,4,5,10,11,8,9,6,7,4,5,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm30
+; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm17
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3],xmm6[4],xmm2[5],xmm6[6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm13 = [8,9,6,7,4,5,10,11,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <0,u,u,u,4,7,11,14>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3,4,5,6],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm26, %zmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm12, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm28 {%k1} # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %ymm21, %ymm13, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm10, %zmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm14, %ymm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2],xmm2[3,4,5,6],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm20, %zmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm31 {%k1} # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %ymm30, %ymm28, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm5, %zmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm5, %zmm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm7 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm7, %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm16 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm16, %xmm1
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,4,7,0,0,4,7,0]
-; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm19, %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm31 = [0,4,7,0,0,4,7,0]
+; AVX512DQ-FAST-NEXT:    # ymm31 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm22, %ymm31, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [2,6,9,13,2,6,9,13]
 ; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm6[5,6,7],ymm3[8,9,10,11,12],ymm6[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7],ymm3[8,9,10,11,12],ymm0[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vpermd %zmm6, %zmm5, %zmm3
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm12 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm12
+; AVX512DQ-FAST-NEXT:    vpermd %zmm12, %zmm5, %zmm3
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31,16,17,22,23,24,25,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa 288(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm15[0,1,2],xmm8[3],xmm15[4],xmm8[5],xmm15[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm9[2,3],ymm2[4,5],ymm9[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm14[0,1,2],xmm8[3],xmm14[4],xmm8[5],xmm14[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm28
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm10, %zmm10
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3,4,5,6],xmm9[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm9
-; AVX512DQ-FAST-NEXT:    vpermd %zmm9, %zmm5, %zmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm10, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm6[0,1,2],xmm8[3,4,5,6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm8
+; AVX512DQ-FAST-NEXT:    vpermd %zmm8, %zmm5, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm5[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1,2],ymm9[3],ymm2[4,5],ymm9[6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm11
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm8 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,u,u,u,4,8,11,15>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm14, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm10, %ymm10
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5],xmm5[6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [10,11,8,9,6,7,12,13,10,11,8,9,6,7,12,13]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,u,u,u,4,8,11,15>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm13, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm10[0,1,2],xmm5[3,4,5,6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm9, %zmm4, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermd %zmm8, %zmm4, %zmm10
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29,18,19,20,21,26,27,28,29]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm10, %ymm10
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm10[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm10[0,1,2,3],xmm5[4],xmm10[5],xmm5[6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm14, %zmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3,4,5,6],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm6, %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm13, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3,4,5,6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm12, %zmm4, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm13[2],ymm11[3,4,5],ymm13[6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm16
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm11[2],ymm9[3,4,5],ymm11[6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm14
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5],xmm5[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,u,u,u,5,8,12,15>
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm10, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpor %ymm5, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm9, %zmm26, %zmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermd %zmm8, %zmm20, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm27 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm5 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm2[2],ymm3[3,4,5],ymm2[6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm3
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm9[4],xmm0[5],xmm9[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm8, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm6, %zmm26, %zmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %ymm21, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm8[4],xmm0[5],xmm8[6],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm10, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm12, %zmm20, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %ymm30, %ymm31, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm4, %zmm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm1
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm1[2],ymm15[3,4,5],ymm1[6],ymm15[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1,2],ymm4[3],ymm15[4,5],ymm4[6],ymm15[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm9, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm9[0,1,2],ymm14[3],ymm9[4,5],ymm14[6],ymm9[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3,4,5],xmm6[6],xmm8[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
@@ -15535,21 +15524,21 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm10, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm20 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm20 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm20, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 864(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 832(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm21 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm21 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 864(%rdi), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa 832(%rdi), %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm19
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm10[4],xmm0[5],xmm10[6],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm9, %zmm8
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm9, %zmm8
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm7
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1,2],ymm2[3],ymm7[4,5],ymm2[6],ymm7[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -15557,72 +15546,68 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm8, %ymm9, %ymm8
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm23 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm23 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm23 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm5[2],ymm7[3,4],ymm5[5],ymm7[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm5[2],ymm13[3,4],ymm5[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm8
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = <0,3,7,10,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm23, %zmm27, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,3,7,10,14,u,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm3, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm12
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm12[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm29, %zmm25
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm14[3],ymm13[4,5],ymm14[6],ymm13[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm14, %ymm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm27
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2],ymm1[3],ymm15[4,5],ymm1[6],ymm15[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2,3,4,5],xmm0[6],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0],ymm15[1],ymm4[2,3],ymm15[4],ymm4[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm17
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0],ymm11[1],ymm14[2,3],ymm11[4],ymm14[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <2,u,u,u,6,9,13,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm15, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm30 = <2,u,u,u,6,9,13,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm30, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm10, %ymm12, %ymm10
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm9[2],ymm6[3,4],ymm9[5],ymm6[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm16
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm10
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm27, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm3, %zmm10
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm31
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm10[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm29, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1],xmm10[2,3,4,5],xmm0[6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm15, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm30, %zmm10
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0],ymm11[1],ymm3[2,3],ymm11[4],ymm3[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm14
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0],ymm7[1],ymm14[2,3],ymm7[4],ymm14[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm12
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3,4,5],xmm12[6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -15630,31 +15615,29 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm10, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm26 {%k1}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm5[2,3],ymm13[4,5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm21
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm25
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = <0,4,7,11,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm23, %zmm22, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm12[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0],ymm14[1],ymm5[2,3],ymm14[4],ymm5[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <0,4,7,11,14,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm17, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm30
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0,1],ymm12[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0],ymm15[1],ymm0[2,3],ymm15[4],ymm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm15, %ymm18
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm15[1],xmm12[2,3,4,5],xmm15[6],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm7[0],ymm13[1],ymm7[2,3,4],ymm13[5],ymm7[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm8[0],ymm11[1],ymm8[2,3,4],ymm11[5],ymm8[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm8, %ymm11
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm15[1],xmm8[2],xmm15[3],xmm8[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
@@ -15662,38 +15645,35 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <3,u,u,u,6,10,13,u>
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm1[0,1,2],ymm12[3,4,5,6,7],ymm1[8,9,10],ymm12[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm9[2,3],ymm6[4,5],ymm9[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm20
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm9, %ymm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm23
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm22, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm17, %zmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1],ymm8[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm24
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2,3,4,5],xmm8[6],xmm1[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm16, %zmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm15
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0],ymm11[1],ymm3[2,3,4],ymm11[5],ymm3[6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm16, %zmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0],ymm3[1],ymm14[2,3,4],ymm3[5],ymm14[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2],xmm8[3],xmm10[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
@@ -15701,27 +15681,28 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm8, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm9 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm9 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1,2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,4,8,11,15,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm23, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm13
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm2[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0],ymm14[1],ymm5[2,3,4],ymm14[5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm13[2],ymm7[3,4],ymm13[5],ymm7[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm0[2],ymm11[3,4],ymm0[5],ymm11[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm4
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
@@ -15735,39 +15716,39 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7],ymm4[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm12 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm12 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm12 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1,2],ymm4[3],ymm6[4,5],ymm4[6],ymm6[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm10, %zmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm10, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0],ymm4[1],ymm0[2,3,4],ymm4[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3,4],ymm6[5],ymm4[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm15[2],ymm3[3,4],ymm15[5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1],ymm3[2],ymm15[3,4],ymm3[5],ymm15[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,1,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm7, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm7, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2],ymm2[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm0, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
@@ -15785,23 +15766,21 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, (%rdx)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%rcx)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rcx)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%r8)
-; AVX512DQ-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 64(%r9)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%r8)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 64(%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, (%r9)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $1288, %rsp # imm = 0x508
+; AVX512DQ-FAST-NEXT:    addq $1240, %rsp # imm = 0x4D8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
index c7fca67c75aea..809b0e45f5c31 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
@@ -3843,123 +3843,133 @@ define void @load_i16_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm7
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm8, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm8, %zmm10
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm9
 ; AVX512BW-NEXT:    movb $-64, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm9
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm9, %zmm16
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
 ; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm9, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm11
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm10
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm11, %zmm9
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
 ; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm10, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm10, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm11
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm12, %zmm10
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm10
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm11, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm11, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm12
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm11
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm12, %zmm11
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
 ; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm12, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm13
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm12
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm14, %zmm12
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm12
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm12
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
 ; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm13, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm14
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm13
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm13
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm15 = [5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm14, %zmm13
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
 ; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm14, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm14, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm15
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm14
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
 ; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm15, %zmm6
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm15, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512BW-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm3, %zmm5
-; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm15, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rsi)
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm7, %zmm14
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2w %zmm4, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%r8)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r11)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%r10)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r10)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -8746,282 +8756,286 @@ define void @load_i16_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512BW-LABEL: load_i16_stride8_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
-; AVX512BW-NEXT:    movb $-64, %al
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
+; AVX512BW-NEXT:    subq $1288, %rsp # imm = 0x508
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm28
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm20
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm2
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm8, %zmm2
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm0
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm7, %zmm0
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm14
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm1, %zmm14
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm9
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm21, %zmm9
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm19, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm26
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm18, %zmm26
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm25
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm22, %zmm25
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm30, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm29, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
 ; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
-; AVX512BW-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
-; AVX512BW-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2w %zmm23, %zmm1, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm23
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm27, %zmm23
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm30
+; AVX512BW-NEXT:    vpermi2w %zmm12, %zmm17, %zmm29
+; AVX512BW-NEXT:    vpermt2w %zmm12, %zmm1, %zmm17
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm24
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm31, %zmm24
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm1, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm15
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm7
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm21
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm19
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm18
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm22
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm27
+; AVX512BW-NEXT:    vpermi2w %zmm3, %zmm15, %zmm31
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm1, %zmm15
+; AVX512BW-NEXT:    movb $-64, %al
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm11
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm4, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm10, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm12
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm8
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm7, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm10
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm14, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm7, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm9, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm9, %zmm11
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm10, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm5, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
-; AVX512BW-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
 ; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm14
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm9
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm10
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2w %zmm16, %zmm12, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2w %zmm16, %zmm0, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm21, %zmm21
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512BW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512BW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm26 {%k1}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm4, %zmm7
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm26, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm18 {%k1}
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm9, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm11, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm18, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm9
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm11
+; AVX512BW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm3
+; AVX512BW-NEXT:    vpermt2w %zmm6, %zmm18, %zmm8
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm1 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm1 = ymm10[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm25, %zmm2
+; AVX512BW-NEXT:    vpblendd $240, (%rsp), %ymm9, %ymm5 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm5 = ymm9[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm22, %zmm5
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm9 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm23 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm23, %zmm9
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm10 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm27 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm27, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm24 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm24, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm31 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm31, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm28 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm11 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm11 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm28, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm8, 64(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm8, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 64(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm8, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rax)
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512BW-NEXT:    addq $1288, %rsp # imm = 0x508
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <512 x i16>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
index a08234ffb482f..873192d4cb934 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
@@ -749,27 +749,28 @@ define void @load_i32_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm3
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28]
 ; AVX512-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm4, %zmm5
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm4
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,4,8,12,16,20,24,28]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm5
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29]
 ; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm5, %zmm6
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,5,9,13,17,21,25,29]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30]
 ; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm6, %zmm7
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = [2,6,10,14,18,22,26,30]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm7[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm7, %zmm2
-; AVX512-NEXT:    vpermt2d %zmm1, %zmm7, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm7
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [3,7,11,15,19,23,27,31]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm7[4,5,6,7]
 ; AVX512-NEXT:    vmovdqa64 %zmm4, (%rsi)
 ; AVX512-NEXT:    vmovdqa64 %zmm5, (%rdx)
 ; AVX512-NEXT:    vmovdqa64 %zmm6, (%rcx)
@@ -1580,63 +1581,64 @@ define void @load_i32_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512-LABEL: load_i32_stride4_vf32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm3
-; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm6
-; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm7
+; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm5
+; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm6
+; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm7
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28]
 ; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512-NEXT:    vpermt2d %zmm6, %zmm8, %zmm9
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm8, %zmm10
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm8, %zmm10
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm10[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512-NEXT:    vpermt2d %zmm7, %zmm8, %zmm9
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,4,8,12,16,20,24,28]
+; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512-NEXT:    vpermt2d %zmm5, %zmm10, %zmm11
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm8
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm10
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[0,1,2,3],zmm8[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29]
 ; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm11
-; AVX512-NEXT:    vpermt2d %zmm6, %zmm10, %zmm11
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm12
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm10, %zmm12
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm12[0,1,2,3],zmm11[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm10, %zmm12
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm12[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm11
+; AVX512-NEXT:    vpermt2d %zmm7, %zmm10, %zmm11
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm12 = [1,5,9,13,17,21,25,29]
+; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512-NEXT:    vpermt2d %zmm5, %zmm12, %zmm13
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm13[0,1,2,3],zmm11[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm10
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm12
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30]
 ; AVX512-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512-NEXT:    vpermt2d %zmm6, %zmm12, %zmm13
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm14
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm12, %zmm14
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm13[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm12, %zmm14
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm12
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],zmm14[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512-NEXT:    vpermt2d %zmm7, %zmm12, %zmm13
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,6,10,14,18,22,26,30]
+; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm15
+; AVX512-NEXT:    vpermt2d %zmm5, %zmm14, %zmm15
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm15[0,1,2,3],zmm13[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm3, %zmm12
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm14
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm14[0,1,2,3],zmm12[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31]
 ; AVX512-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2d %zmm6, %zmm14, %zmm7
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm14, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7]
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm14, %zmm2
-; AVX512-NEXT:    vpermt2d %zmm1, %zmm14, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm9, 64(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm8, (%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm11, 64(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm10, (%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm13, 64(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm12, (%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm4, 64(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm0, (%r8)
+; AVX512-NEXT:    vpermt2d %zmm7, %zmm14, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = [3,7,11,15,19,23,27,31]
+; AVX512-NEXT:    vpermt2d %zmm5, %zmm7, %zmm4
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm6[4,5,6,7]
+; AVX512-NEXT:    vpermt2d %zmm2, %zmm14, %zmm3
+; AVX512-NEXT:    vpermt2d %zmm0, %zmm7, %zmm1
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm3[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm8, 64(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm9, (%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm10, 64(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm11, (%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm12, 64(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm13, (%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm0, 64(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm4, (%r8)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %wide.vec = load <128 x i32>, ptr %in.vec, align 64
@@ -3308,115 +3310,116 @@ define void @load_i32_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512-LABEL: load_i32_stride4_vf64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm3
-; AVX512-NEXT:    vmovdqa64 832(%rdi), %zmm4
-; AVX512-NEXT:    vmovdqa64 768(%rdi), %zmm5
-; AVX512-NEXT:    vmovdqa64 960(%rdi), %zmm10
-; AVX512-NEXT:    vmovdqa64 896(%rdi), %zmm7
-; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm14
-; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm15
+; AVX512-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 768(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 960(%rdi), %zmm3
+; AVX512-NEXT:    vmovdqa64 896(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm6
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm8
+; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm9
+; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm12
+; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm13
+; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm14
 ; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm17
 ; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm16
-; AVX512-NEXT:    vmovdqa64 576(%rdi), %zmm8
-; AVX512-NEXT:    vmovdqa64 512(%rdi), %zmm9
-; AVX512-NEXT:    vmovdqa64 704(%rdi), %zmm12
-; AVX512-NEXT:    vmovdqa64 640(%rdi), %zmm11
+; AVX512-NEXT:    vmovdqa64 576(%rdi), %zmm4
+; AVX512-NEXT:    vmovdqa64 512(%rdi), %zmm5
+; AVX512-NEXT:    vmovdqa64 704(%rdi), %zmm10
+; AVX512-NEXT:    vmovdqa64 640(%rdi), %zmm7
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28]
 ; AVX512-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm6
-; AVX512-NEXT:    vpermt2d %zmm12, %zmm19, %zmm6
-; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512-NEXT:    vpermt2d %zmm8, %zmm19, %zmm13
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm13[0,1,2,3],zmm6[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm13
-; AVX512-NEXT:    vpermt2d %zmm17, %zmm19, %zmm13
-; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm18
-; AVX512-NEXT:    vpermt2d %zmm14, %zmm19, %zmm18
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm18[0,1,2,3],zmm13[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512-NEXT:    vpermt2d %zmm10, %zmm19, %zmm18
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm20
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm19, %zmm20
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm20[0,1,2,3],zmm18[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm20
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm19, %zmm20
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm19
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,2,3],zmm20[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm11
+; AVX512-NEXT:    vpermt2d %zmm10, %zmm19, %zmm11
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,4,8,12,16,20,24,28]
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512-NEXT:    vpermt2d %zmm4, %zmm20, %zmm15
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm15[0,1,2,3],zmm11[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm15
+; AVX512-NEXT:    vpermt2d %zmm17, %zmm19, %zmm15
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm18
+; AVX512-NEXT:    vpermt2d %zmm13, %zmm20, %zmm18
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm18[0,1,2,3],zmm15[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512-NEXT:    vpermt2d %zmm12, %zmm19, %zmm18
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm21
+; AVX512-NEXT:    vpermt2d %zmm8, %zmm20, %zmm21
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm21[0,1,2,3],zmm18[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm19
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm20[0,1,2,3],zmm19[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29]
 ; AVX512-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm20
-; AVX512-NEXT:    vpermt2d %zmm12, %zmm21, %zmm20
-; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm22
-; AVX512-NEXT:    vpermt2d %zmm8, %zmm21, %zmm22
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm22[0,1,2,3],zmm20[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm22
-; AVX512-NEXT:    vpermt2d %zmm17, %zmm21, %zmm22
-; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm23
-; AVX512-NEXT:    vpermt2d %zmm14, %zmm21, %zmm23
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],zmm22[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512-NEXT:    vpermt2d %zmm10, %zmm21, %zmm23
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm24
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm21, %zmm24
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm20
+; AVX512-NEXT:    vpermt2d %zmm10, %zmm21, %zmm20
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [1,5,9,13,17,21,25,29]
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm23
+; AVX512-NEXT:    vpermt2d %zmm4, %zmm22, %zmm23
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm23[0,1,2,3],zmm20[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm23
+; AVX512-NEXT:    vpermt2d %zmm17, %zmm21, %zmm23
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm24
+; AVX512-NEXT:    vpermt2d %zmm13, %zmm22, %zmm24
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm24[0,1,2,3],zmm23[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm24
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm21, %zmm24
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm21
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],zmm24[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30]
-; AVX512-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm25
-; AVX512-NEXT:    vpermt2d %zmm12, %zmm24, %zmm25
-; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm26
-; AVX512-NEXT:    vpermt2d %zmm8, %zmm24, %zmm26
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm26[0,1,2,3],zmm25[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm26
-; AVX512-NEXT:    vpermt2d %zmm17, %zmm24, %zmm26
-; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm27
-; AVX512-NEXT:    vpermt2d %zmm14, %zmm24, %zmm27
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm27[0,1,2,3],zmm26[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm27
-; AVX512-NEXT:    vpermt2d %zmm10, %zmm24, %zmm27
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm28
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm24, %zmm28
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm24
+; AVX512-NEXT:    vpermt2d %zmm12, %zmm21, %zmm24
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm25
+; AVX512-NEXT:    vpermt2d %zmm8, %zmm22, %zmm25
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm25[0,1,2,3],zmm24[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm21
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm22
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm22[0,1,2,3],zmm21[4,5,6,7]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30]
+; AVX512-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm25
+; AVX512-NEXT:    vpermt2d %zmm10, %zmm22, %zmm25
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [2,6,10,14,18,22,26,30]
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm27
+; AVX512-NEXT:    vpermt2d %zmm4, %zmm26, %zmm27
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm27[0,1,2,3],zmm25[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm27
+; AVX512-NEXT:    vpermt2d %zmm17, %zmm22, %zmm27
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm28
+; AVX512-NEXT:    vpermt2d %zmm13, %zmm26, %zmm28
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm28[0,1,2,3],zmm27[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm28
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm24, %zmm28
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm24
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,2,3],zmm28[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31]
-; AVX512-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2d %zmm17, %zmm28, %zmm16
-; AVX512-NEXT:    vpermt2d %zmm14, %zmm28, %zmm15
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm15[0,1,2,3],zmm16[4,5,6,7]
-; AVX512-NEXT:    vpermt2d %zmm10, %zmm28, %zmm7
-; AVX512-NEXT:    vpermt2d %zmm4, %zmm28, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7]
-; AVX512-NEXT:    vpermt2d %zmm12, %zmm28, %zmm11
-; AVX512-NEXT:    vpermt2d %zmm8, %zmm28, %zmm9
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm9[0,1,2,3],zmm11[4,5,6,7]
-; AVX512-NEXT:    vpermt2d %zmm3, %zmm28, %zmm2
-; AVX512-NEXT:    vpermt2d %zmm1, %zmm28, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm18, 192(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm19, (%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm13, 64(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm6, 128(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm23, 192(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm21, (%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm22, 64(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm28
+; AVX512-NEXT:    vpermt2d %zmm12, %zmm22, %zmm28
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512-NEXT:    vpermt2d %zmm8, %zmm26, %zmm29
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm29[0,1,2,3],zmm28[4,5,6,7]
+; AVX512-NEXT:    vpermi2d %zmm3, %zmm2, %zmm22
+; AVX512-NEXT:    vpermi2d %zmm0, %zmm1, %zmm26
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm26[0,1,2,3],zmm22[4,5,6,7]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31]
+; AVX512-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermt2d %zmm17, %zmm26, %zmm16
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [3,7,11,15,19,23,27,31]
+; AVX512-NEXT:    vpermt2d %zmm13, %zmm17, %zmm14
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm16[4,5,6,7]
+; AVX512-NEXT:    vpermt2d %zmm12, %zmm26, %zmm9
+; AVX512-NEXT:    vpermt2d %zmm8, %zmm17, %zmm6
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm9[4,5,6,7]
+; AVX512-NEXT:    vpermt2d %zmm3, %zmm26, %zmm2
+; AVX512-NEXT:    vpermt2d %zmm0, %zmm17, %zmm1
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512-NEXT:    vpermt2d %zmm10, %zmm26, %zmm7
+; AVX512-NEXT:    vpermt2d %zmm4, %zmm17, %zmm5
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm7[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm19, 192(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm15, 64(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm11, 128(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm21, 192(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm24, (%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm23, 64(%rdx)
 ; AVX512-NEXT:    vmovdqa64 %zmm20, 128(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm27, 192(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm26, 64(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm28, (%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm27, 64(%rcx)
 ; AVX512-NEXT:    vmovdqa64 %zmm25, 128(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm5, 128(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm4, 192(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm0, (%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm14, 64(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm1, 128(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm0, 192(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm6, (%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm13, 64(%r8)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %wide.vec = load <256 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index 5d3f85d6ab692..c11fb0dbbbba3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -9553,11 +9553,11 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm21
 ; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm18
 ; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm27
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm25
 ; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm4
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
@@ -9567,111 +9567,111 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
 ; AVX512F-NEXT:    vpermt2d %zmm25, %zmm6, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7
 ; AVX512F-NEXT:    vpermt2d %zmm27, %zmm6, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm5, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm5, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm7
 ; AVX512F-NEXT:    vpermt2d %zmm20, %zmm6, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm20, %zmm6, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
 ; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm7, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm27, %zmm6, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm25, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm21, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm21, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm4, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm21, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm4, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm21, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm27, %zmm6, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm25, %zmm7, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm27, %zmm0, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2d %zmm25, %zmm0, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm7, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm20, %zmm6, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm8, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm7, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
 ; AVX512F-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
 ; AVX512F-NEXT:    vpermt2d %zmm18, %zmm31, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
 ; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
 ; AVX512F-NEXT:    vpermt2d %zmm18, %zmm25, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
-; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm3, %zmm0
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm4, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm26
 ; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512F-NEXT:    vpermt2d %zmm26, %zmm5, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm26, %zmm8, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm26, %zmm7, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm2
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm31, %zmm2
@@ -9680,7 +9680,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm25, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm1, %zmm3, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm4, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm0, %zmm26
 ; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm22
@@ -9692,22 +9692,22 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm2
 ; AVX512F-NEXT:    vpermi2d %zmm19, %zmm2, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm8, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm19, %zmm2, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm7, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm19, %zmm2, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm28
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm31, %zmm28
 ; AVX512F-NEXT:    vpermi2d %zmm2, %zmm19, %zmm31
 ; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm29
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm25, %zmm29
 ; AVX512F-NEXT:    vpermi2d %zmm2, %zmm19, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm1, %zmm3, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm19, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm4, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm19, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm19
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm0, %zmm22
 ; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm2
@@ -9903,11 +9903,11 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm21
 ; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm18
 ; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm27
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm25
 ; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm4
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,6,12,0,0,0,20,26,0,6,12,0,0,0,20,26]
@@ -9917,111 +9917,111 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
 ; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm6, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7
 ; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm6, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm5, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm5, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7
 ; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm6, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,3,9,15,21,27,0,0,0,3,9,15,21,27]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
 ; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm7, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm6, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,13,0,0,0,21,27,1,7,13,0,0,0,21,27]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm21, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm21, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,20,26,0,6,12,0,0,0,20,26,0,6,12]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm21, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,21,27,1,7,13,0,0,0,21,27,1,7,13]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm21, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,0,0,6,12,18,24,30,0,0,0,6,12,18,24,30]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm6, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,1,7,13,19,25,31,0,0,1,7,13,19,25,31]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm7, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm0, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm0, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm7, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm6, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm8, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm7, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [18,24,30,0,0,0,6,12,18,24,30,0,0,0,6,12]
 ; AVX512BW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm31, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [19,25,31,0,0,1,7,13,19,25,31,0,0,1,7,13]
 ; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
 ; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm25, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm3, %zmm0
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [20,26,0,0,0,2,8,14,20,26,0,0,0,2,8,14]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm4, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [21,27,0,0,0,3,9,15,21,27,0,0,0,3,9,15]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm26
 ; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm5, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm8, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm7, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm2
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm31, %zmm2
@@ -10030,7 +10030,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm25, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm3, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm4, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm0, %zmm26
 ; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm22
@@ -10042,22 +10042,22 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm2
 ; AVX512BW-NEXT:    vpermi2d %zmm19, %zmm2, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm8, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm19, %zmm2, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm7, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm19, %zmm2, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm28
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm31, %zmm28
 ; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm19, %zmm31
 ; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm29
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm25, %zmm29
 ; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm19, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm3, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm19, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm4, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm19, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm19
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm0, %zmm22
 ; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index 705fda41f71e1..d4a3d10e504a4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -171,12 +171,10 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512-FAST-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512-FAST-NEXT:    vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [13,4,6,7,13,4,6,7]
-; AVX512-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [13,4,13,4,13,4,13,4]
 ; AVX512-FAST-NEXT:    vpermi2d %ymm6, %ymm1, %ymm7
 ; AVX512-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,13,6,7,6,13,6,7]
-; AVX512-FAST-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [6,13,6,13,6,13,6,13]
 ; AVX512-FAST-NEXT:    vpermi2d %ymm1, %ymm6, %ymm8
 ; AVX512-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm1
 ; AVX512-FAST-NEXT:    vmovq %xmm2, (%rsi)
@@ -2439,115 +2437,118 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm7
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm7
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm8, %zmm7, %zmm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,7,14,21,28,u,u,u>
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm5
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,7,14,21,28,u,u,u>
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    movw $992, %di # imm = 0x3E0
 ; AVX512F-NEXT:    kmovw %edi, %k1
-; AVX512F-NEXT:    vmovdqa32 %zmm6, %zmm3 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512F-NEXT:    vmovdqa32 %zmm8, %zmm2 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm8
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
 ; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm6, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm8, %zmm9
 ; AVX512F-NEXT:    movb $-32, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm7, %zmm8, %zmm10
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
-; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm7, %zmm8, %zmm11
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
-; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm7, %zmm8, %zmm12
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
-; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm8, %zmm7, %zmm13
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm6, %zmm8
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,8,15,22,29,u,u,u>
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm0, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = <1,8,15,22,29,u,u,u>
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
 ; AVX512F-NEXT:    movw $480, %di # imm = 0x1E0
 ; AVX512F-NEXT:    kmovw %edi, %k2
-; AVX512F-NEXT:    vmovdqa32 %zmm8, %zmm14 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm8
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm8, %zmm15
+; AVX512F-NEXT:    vmovdqa32 %zmm9, %zmm8 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm9
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm9, %zmm10
 ; AVX512F-NEXT:    movw $-512, %di # imm = 0xFE00
 ; AVX512F-NEXT:    kmovw %edi, %k1
-; AVX512F-NEXT:    vmovdqa32 %zmm15, %zmm14 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm8, %zmm9
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = <18,25,0,7,14,u,u,u>
-; AVX512F-NEXT:    vpermi2d %zmm0, %zmm2, %zmm7
-; AVX512F-NEXT:    vmovdqa32 %zmm9, %zmm7 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
-; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm9
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm9, %zmm15
-; AVX512F-NEXT:    vmovdqa32 %zmm15, %zmm7 {%k1}
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <19,26,1,8,15,u,u,u>
-; AVX512F-NEXT:    vpermi2d %zmm0, %zmm2, %zmm9
+; AVX512F-NEXT:    vmovdqa32 %zmm10, %zmm8 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <18,25,0,7,14,u,u,u>
+; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm9
 ; AVX512F-NEXT:    vmovdqa32 %zmm10, %zmm9 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
 ; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm10
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm10, %zmm15
-; AVX512F-NEXT:    vmovdqa32 %zmm15, %zmm9 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm10
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm10, %zmm15
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11,18,25]
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm0, %zmm10
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm10, %zmm11, %zmm10
-; AVX512F-NEXT:    vmovdqa32 %zmm15, %zmm10 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm6
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm10
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
 ; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm6, %zmm11
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [5,12,19,26]
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm0, %zmm6
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm6, %zmm12, %zmm6
-; AVX512F-NEXT:    vmovdqa32 %zmm11, %zmm6 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm8, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm4, %zmm5
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,13,20,27]
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm13, %zmm0
-; AVX512F-NEXT:    vmovdqa32 %zmm5, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, (%r10)
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm10, %zmm11
+; AVX512F-NEXT:    vmovdqa32 %zmm11, %zmm9 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = <19,26,1,8,15,u,u,u>
+; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm10
+; AVX512F-NEXT:    vmovdqa32 %zmm11, %zmm10 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm7, %zmm6, %zmm11
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm11, %zmm12
+; AVX512F-NEXT:    vmovdqa32 %zmm12, %zmm10 {%k1}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm7, %zmm6, %zmm11
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm11, %zmm12
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,11,18,25]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm13
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm13, %zmm11, %zmm11
+; AVX512F-NEXT:    vmovdqa32 %zmm12, %zmm11 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm12
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
+; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm12, %zmm13
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm5, %zmm4, %zmm12
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = [5,12,19,26]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm14
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm14, %zmm12, %zmm12
+; AVX512F-NEXT:    vmovdqa32 %zmm13, %zmm12 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm13
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm13, %zmm6
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
+; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [6,13,20,27]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, (%r10)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -2556,115 +2557,118 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm7
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm7
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm7, %zmm6
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,7,14,21,28,u,u,u>
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm5
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,7,14,21,28,u,u,u>
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    movw $992, %di # imm = 0x3E0
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa32 %zmm6, %zmm3 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm6
+; AVX512BW-NEXT:    vmovdqa32 %zmm8, %zmm2 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm8
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
 ; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm6, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm8, %zmm9
 ; AVX512BW-NEXT:    movb $-32, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm7, %zmm8, %zmm10
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm7, %zmm8, %zmm11
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
-; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm7, %zmm8, %zmm12
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
-; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm7, %zmm13
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm6, %zmm8
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,8,15,22,29,u,u,u>
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = <1,8,15,22,29,u,u,u>
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
 ; AVX512BW-NEXT:    movw $480, %di # imm = 0x1E0
 ; AVX512BW-NEXT:    kmovd %edi, %k2
-; AVX512BW-NEXT:    vmovdqa32 %zmm8, %zmm14 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm8
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm8, %zmm15
+; AVX512BW-NEXT:    vmovdqa32 %zmm9, %zmm8 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm9
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm9, %zmm10
 ; AVX512BW-NEXT:    movw $-512, %di # imm = 0xFE00
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa32 %zmm15, %zmm14 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm8, %zmm9
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <18,25,0,7,14,u,u,u>
-; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm7
-; AVX512BW-NEXT:    vmovdqa32 %zmm9, %zmm7 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
-; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm9
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm9, %zmm15
-; AVX512BW-NEXT:    vmovdqa32 %zmm15, %zmm7 {%k1}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = <19,26,1,8,15,u,u,u>
-; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm9
+; AVX512BW-NEXT:    vmovdqa32 %zmm10, %zmm8 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = <18,25,0,7,14,u,u,u>
+; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm9
 ; AVX512BW-NEXT:    vmovdqa32 %zmm10, %zmm9 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
 ; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm10
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm10, %zmm15
-; AVX512BW-NEXT:    vmovdqa32 %zmm15, %zmm9 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm10
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm10, %zmm15
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11,18,25]
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm10
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm10, %zmm11, %zmm10
-; AVX512BW-NEXT:    vmovdqa32 %zmm15, %zmm10 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm6
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm10
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm6, %zmm11
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [5,12,19,26]
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm6
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm6, %zmm12, %zmm6
-; AVX512BW-NEXT:    vmovdqa32 %zmm11, %zmm6 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm8, %zmm4
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm4, %zmm5
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,13,20,27]
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm13, %zmm0
-; AVX512BW-NEXT:    vmovdqa32 %zmm5, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%r10)
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm10, %zmm11
+; AVX512BW-NEXT:    vmovdqa32 %zmm11, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm10 = <19,26,1,8,15,u,u,u>
+; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm10
+; AVX512BW-NEXT:    vmovdqa32 %zmm11, %zmm10 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm7, %zmm6, %zmm11
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm11, %zmm12
+; AVX512BW-NEXT:    vmovdqa32 %zmm12, %zmm10 {%k1}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm7, %zmm6, %zmm11
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm11, %zmm12
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,11,18,25]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm13
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm13, %zmm11, %zmm11
+; AVX512BW-NEXT:    vmovdqa32 %zmm12, %zmm11 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm12
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
+; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm12, %zmm13
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm12
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [5,12,19,26]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm14
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm14, %zmm12, %zmm12
+; AVX512BW-NEXT:    vmovdqa32 %zmm13, %zmm12 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm13
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm13, %zmm6
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [6,13,20,27]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%r10)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -5570,99 +5574,101 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa32 %zmm20, %zmm17 {%k1}
 ; AVX512F-NEXT:    vpermi2d %zmm6, %zmm3, %zmm19
 ; AVX512F-NEXT:    vpermt2d %zmm5, %zmm21, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm20
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm18, %zmm20
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm18
 ; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm16
-; AVX512F-NEXT:    vmovdqa32 %zmm20, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm18, %zmm16 {%k2}
 ; AVX512F-NEXT:    vmovdqa32 %zmm19, %zmm16 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
-; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm22
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm21, %zmm22
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <18,25,0,7,14,u,u,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm20
-; AVX512F-NEXT:    vpermt2d %zmm10, %zmm19, %zmm20
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm21
+; AVX512F-NEXT:    vpermt2d %zmm12, %zmm20, %zmm21
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <18,25,0,7,14,u,u,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm19
+; AVX512F-NEXT:    vpermt2d %zmm10, %zmm18, %zmm19
+; AVX512F-NEXT:    vmovdqa32 %zmm21, %zmm19 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
+; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm15, %zmm21, %zmm22
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
+; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm23, %zmm22
+; AVX512F-NEXT:    vmovdqa32 %zmm22, %zmm19 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm3, %zmm21
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm23, %zmm21
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm20
+; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm18
+; AVX512F-NEXT:    vmovdqa32 %zmm20, %zmm18 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm21, %zmm18 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm23
+; AVX512F-NEXT:    vpermt2d %zmm12, %zmm22, %zmm23
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <19,26,1,8,15,u,u,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm21
+; AVX512F-NEXT:    vpermt2d %zmm10, %zmm20, %zmm21
+; AVX512F-NEXT:    vmovdqa32 %zmm23, %zmm21 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm9, %zmm23, %zmm24
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
+; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm25, %zmm24
+; AVX512F-NEXT:    vmovdqa32 %zmm24, %zmm21 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm6, %zmm23
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm25, %zmm23
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm22
+; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
 ; AVX512F-NEXT:    vmovdqa32 %zmm22, %zmm20 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
-; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm23
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm22, %zmm23
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
-; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm24, %zmm23
 ; AVX512F-NEXT:    vmovdqa32 %zmm23, %zmm20 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm6, %zmm3, %zmm22
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm24, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm23
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm21, %zmm23
-; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm19
-; AVX512F-NEXT:    vmovdqa32 %zmm23, %zmm19 {%k2}
-; AVX512F-NEXT:    vmovdqa32 %zmm22, %zmm19 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
-; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm25
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm24, %zmm25
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm22 = <19,26,1,8,15,u,u,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm23
-; AVX512F-NEXT:    vpermt2d %zmm10, %zmm22, %zmm23
-; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm23 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm23
+; AVX512F-NEXT:    vpermt2d %zmm9, %zmm24, %zmm23
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
 ; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm25, %zmm26
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
-; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm27, %zmm26
-; AVX512F-NEXT:    vmovdqa32 %zmm26, %zmm23 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm3, %zmm6, %zmm25
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm27, %zmm25
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm24
-; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm22
-; AVX512F-NEXT:    vmovdqa32 %zmm24, %zmm22 {%k2}
-; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm22 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm25, %zmm23
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
+; AVX512F-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm12, %zmm26, %zmm22
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm27 = [4,11,18,25]
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm28
+; AVX512F-NEXT:    vpermt2d %zmm11, %zmm27, %zmm28
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm28, %zmm22, %zmm22
+; AVX512F-NEXT:    vmovdqa32 %zmm23, %zmm22 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm6, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm25, %zmm24
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm26
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm27
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm27, %zmm26, %zmm23
+; AVX512F-NEXT:    vmovdqa32 %zmm24, %zmm23 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512F-NEXT:    vpermt2d %zmm15, %zmm24, %zmm25
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
 ; AVX512F-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm25
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm26, %zmm25
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm26, %zmm25
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
 ; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm27, %zmm25
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
-; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm28, %zmm24
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [4,11,18,25]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm28
+; AVX512F-NEXT:    vpermt2d %zmm12, %zmm27, %zmm28
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [5,12,19,26]
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm30
 ; AVX512F-NEXT:    vpermt2d %zmm11, %zmm29, %zmm30
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm30, %zmm24, %zmm24
-; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm24 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm3, %zmm6, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm27, %zmm26
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm28
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm30, %zmm28, %zmm28
+; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm28 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm3, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm26, %zmm24
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm27
 ; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm29
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm29, %zmm28, %zmm25
-; AVX512F-NEXT:    vmovdqa32 %zmm26, %zmm25 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm18, %zmm26
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
-; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm27, %zmm26
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
-; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm29
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm28, %zmm29
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm30 = [5,12,19,26]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm31
-; AVX512F-NEXT:    vpermt2d %zmm11, %zmm30, %zmm31
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm31, %zmm29, %zmm29
-; AVX512F-NEXT:    vmovdqa32 %zmm26, %zmm29 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm6, %zmm3, %zmm18
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm27, %zmm18
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm4, %zmm28
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm30
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm30, %zmm28, %zmm26
-; AVX512F-NEXT:    vmovdqa32 %zmm18, %zmm26 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm21, %zmm9
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm29, %zmm27, %zmm25
+; AVX512F-NEXT:    vmovdqa32 %zmm24, %zmm25 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm15, %zmm24, %zmm9
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
 ; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2d %zmm13, %zmm15, %zmm9
@@ -5673,7 +5679,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2d %zmm11, %zmm14, %zmm10
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm10, %zmm12, %zmm10
 ; AVX512F-NEXT:    vmovdqa32 %zmm9, %zmm10 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm21, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm24, %zmm3
 ; AVX512F-NEXT:    vpermt2d %zmm5, %zmm15, %zmm3
 ; AVX512F-NEXT:    vpermt2d %zmm4, %zmm13, %zmm2
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm14, %zmm0
@@ -5683,14 +5689,14 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm16, 64(%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm23, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm25, 64(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm26, 64(%r10)
-; AVX512F-NEXT:    vmovdqa64 %zmm29, (%r10)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 64(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, 64(%r10)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, (%r10)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rax)
 ; AVX512F-NEXT:    vzeroupper
@@ -5762,99 +5768,101 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa32 %zmm20, %zmm17 {%k1}
 ; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm3, %zmm19
 ; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm21, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm20
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm18, %zmm20
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm18
 ; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm16
-; AVX512BW-NEXT:    vmovdqa32 %zmm20, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm18, %zmm16 {%k2}
 ; AVX512BW-NEXT:    vmovdqa32 %zmm19, %zmm16 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
-; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm22
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm21, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <18,25,0,7,14,u,u,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm20
-; AVX512BW-NEXT:    vpermt2d %zmm10, %zmm19, %zmm20
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm21
+; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm20, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <18,25,0,7,14,u,u,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm19
+; AVX512BW-NEXT:    vpermt2d %zmm10, %zmm18, %zmm19
+; AVX512BW-NEXT:    vmovdqa32 %zmm21, %zmm19 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm21, %zmm22
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
+; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm23, %zmm22
+; AVX512BW-NEXT:    vmovdqa32 %zmm22, %zmm19 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm3, %zmm21
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm23, %zmm21
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm20
+; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm18
+; AVX512BW-NEXT:    vmovdqa32 %zmm20, %zmm18 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm21, %zmm18 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm23
+; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm22, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <19,26,1,8,15,u,u,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm21
+; AVX512BW-NEXT:    vpermt2d %zmm10, %zmm20, %zmm21
+; AVX512BW-NEXT:    vmovdqa32 %zmm23, %zmm21 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm23, %zmm24
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm25, %zmm24
+; AVX512BW-NEXT:    vmovdqa32 %zmm24, %zmm21 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm6, %zmm23
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm25, %zmm23
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm22
+; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
 ; AVX512BW-NEXT:    vmovdqa32 %zmm22, %zmm20 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
-; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm23
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm22, %zmm23
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
-; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm24, %zmm23
 ; AVX512BW-NEXT:    vmovdqa32 %zmm23, %zmm20 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm3, %zmm22
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm24, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm23
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm21, %zmm23
-; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm19
-; AVX512BW-NEXT:    vmovdqa32 %zmm23, %zmm19 {%k2}
-; AVX512BW-NEXT:    vmovdqa32 %zmm22, %zmm19 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
-; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm25
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm24, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = <19,26,1,8,15,u,u,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm23
-; AVX512BW-NEXT:    vpermt2d %zmm10, %zmm22, %zmm23
-; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm23 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm23
+; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm24, %zmm23
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
 ; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm25, %zmm26
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
-; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm27, %zmm26
-; AVX512BW-NEXT:    vmovdqa32 %zmm26, %zmm23 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm6, %zmm25
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm27, %zmm25
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm24
-; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm22
-; AVX512BW-NEXT:    vmovdqa32 %zmm24, %zmm22 {%k2}
-; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm22 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm25, %zmm23
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
+; AVX512BW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm26, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm27 = [4,11,18,25]
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm28
+; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm27, %zmm28
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm28, %zmm22, %zmm22
+; AVX512BW-NEXT:    vmovdqa32 %zmm23, %zmm22 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm6, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm25, %zmm24
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm26
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm27
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm27, %zmm26, %zmm23
+; AVX512BW-NEXT:    vmovdqa32 %zmm24, %zmm23 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm24, %zmm25
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
 ; AVX512BW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm25
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm26, %zmm25
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm26, %zmm25
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
 ; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm27, %zmm25
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
-; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm28, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [4,11,18,25]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm28
+; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm27, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [5,12,19,26]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm30
 ; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm29, %zmm30
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm30, %zmm24, %zmm24
-; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm24 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm6, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm27, %zmm26
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm28
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm30, %zmm28, %zmm28
+; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm28 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm3, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm26, %zmm24
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm27
 ; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm29
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm29, %zmm28, %zmm25
-; AVX512BW-NEXT:    vmovdqa32 %zmm26, %zmm25 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm18, %zmm26
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
-; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm27, %zmm26
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
-; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm29
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm28, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm30 = [5,12,19,26]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm31
-; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm30, %zmm31
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm31, %zmm29, %zmm29
-; AVX512BW-NEXT:    vmovdqa32 %zmm26, %zmm29 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm3, %zmm18
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm27, %zmm18
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm4, %zmm28
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm30
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm30, %zmm28, %zmm26
-; AVX512BW-NEXT:    vmovdqa32 %zmm18, %zmm26 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm21, %zmm9
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm29, %zmm27, %zmm25
+; AVX512BW-NEXT:    vmovdqa32 %zmm24, %zmm25 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm24, %zmm9
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
 ; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm15, %zmm9
@@ -5865,7 +5873,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm14, %zmm10
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm10, %zmm12, %zmm10
 ; AVX512BW-NEXT:    vmovdqa32 %zmm9, %zmm10 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm21, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm24, %zmm3
 ; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm3
 ; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm13, %zmm2
 ; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm14, %zmm0
@@ -5875,14 +5883,14 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm16, 64(%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, 64(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, 64(%r10)
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, (%r10)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 64(%r10)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, (%r10)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
@@ -11710,306 +11718,318 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-LABEL: load_i32_stride7_vf64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm12
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm13
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm14
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm1, %zmm3
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm8, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm20
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm14
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm19
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm6, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm6, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm6, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm20, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm8, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm12
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm8, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm13
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm8, %zmm13
+; AVX512F-NEXT:    vpermi2d %zmm20, %zmm2, %zmm8
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm15, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm17
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm15, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm15, %zmm18
+; AVX512F-NEXT:    vpermi2d %zmm20, %zmm2, %zmm15
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm21, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm23
+; AVX512F-NEXT:    vpermt2d %zmm4, %zmm21, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm21, %zmm24
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm20, %zmm21
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm26
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm25, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm27
+; AVX512F-NEXT:    vpermt2d %zmm4, %zmm25, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm28
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm29
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm20, %zmm25
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm30, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm31
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm30, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm30, %zmm4
+; AVX512F-NEXT:    vpermi2d %zmm20, %zmm6, %zmm30
+; AVX512F-NEXT:    vpermt2d %zmm20, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm19
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm17, %zmm11, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm11
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm13
+; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm17
+; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm18
+; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm22
+; AVX512F-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm23
+; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm24
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm21
+; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm26
+; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm28
+; AVX512F-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm29
+; AVX512F-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm25
+; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm31
+; AVX512F-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm30
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm27
+; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm14, %zmm2, %zmm19
+; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm30, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512F-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm8, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm17, %zmm11, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm31, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm8, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm17, %zmm11, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm6, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm0, %zmm3
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
+; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm14, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm11, %zmm17, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm29, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm30, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm14, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm31, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm11, %zmm17, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm15
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
-; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm30, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm30, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm30, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm10
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm30, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm30, %zmm21
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm10, %zmm21
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm8, %zmm30, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm10, %zmm21
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm30, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm10, %zmm21
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm17, %zmm11, %zmm30
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm10, %zmm30
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm8, %zmm10, %zmm13
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm8, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm10, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm8, %zmm14
-; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm10, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm8, %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm10, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm8, %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
-; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm25, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
-; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm27, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm6, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
-; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm28, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm29, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
-; AVX512F-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm31, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm23
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm30, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm2, %zmm16
-; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm16
+; AVX512F-NEXT:    vpermi2d %zmm2, %zmm16, %zmm30
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm31, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm2, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm27, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm3, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm28, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm31, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm18
-; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm25, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm19, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm27, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm19, %zmm1, %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm28, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm19, %zmm1, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm31, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm19, %zmm1, %zmm31
-; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm19
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm2, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm6, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm29, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm2, %zmm29
+; AVX512F-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm1, %zmm16
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm25
 ; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm18
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,7,14,21,28,u,u,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm24
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,8,15,22,29,u,u,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm23
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm3, %zmm23
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = <18,25,0,7,14,u,u,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm4, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm3, %zmm26
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = <18,25,0,7,14,u,u,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm6, %zmm27
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = <19,26,1,8,15,u,u,u>
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm29
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm7, %zmm29
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [4,11,18,25]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm18, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [5,12,19,26]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm7, %zmm29
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [4,11,18,25]
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm19, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm20 = [6,13,20,27]
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm20, %zmm17
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm20 = [5,12,19,26]
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm20, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm21 = [6,13,20,27]
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm21, %zmm18
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm13
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm14
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm3, %zmm14
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm4, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm7, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm18, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm4, %zmm6, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512F-NEXT:    vpermt2d %zmm4, %zmm7, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm19, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm20, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm20, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm21, %zmm4
 ; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
 ; AVX512F-NEXT:    vpermt2d %zmm9, %zmm2, %zmm8
 ; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm1
 ; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10
 ; AVX512F-NEXT:    vpermt2d %zmm9, %zmm3, %zmm10
 ; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm3
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm4, %zmm11
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm6, %zmm11
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm7, %zmm12
+; AVX512F-NEXT:    vpermt2d %zmm5, %zmm7, %zmm12
 ; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm18, %zmm21
-; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm19, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm9, %zmm19, %zmm22
 ; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm19
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm20, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm20, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm28
+; AVX512F-NEXT:    vpermt2d %zmm9, %zmm20, %zmm28
+; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm9, %zmm21, %zmm5
 ; AVX512F-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512F-NEXT:    kmovw %eax, %k1
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -12019,14 +12039,14 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm8 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
-; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm2 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm30, %zmm2 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
 ; AVX512F-NEXT:    movw $480, %ax # imm = 0x1E0
@@ -12038,15 +12058,14 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm10 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm23 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm26 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm23 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm26 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm14 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm14 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm31, %zmm3 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -12054,17 +12073,18 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm11 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm27 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm27 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm15 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm15 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm4 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm6 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm6 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -12073,88 +12093,92 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm29 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm29 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm17 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm17 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm16 {%k1}
-; AVX512F-NEXT:    vmovdqa32 %zmm27, %zmm7 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm7 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm0, %zmm7 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm21, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm22, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm9, %zmm0 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm20, %zmm9 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm20 {%k1}
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm18, %zmm28, %zmm18
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm25, %zmm18 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm26, %zmm25, %zmm25
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm26, %zmm25 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm22, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm22 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm30, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm19, %zmm21, %zmm19
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm30, %zmm19 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm27, %zmm26 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm28, %zmm27, %zmm28
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm30, %zmm28 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm28, %zmm27 {%k1}
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm19, %zmm31, %zmm19
-; AVX512F-NEXT:    vmovdqa32 %zmm30, %zmm19 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm17, %zmm28, %zmm17
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm28, %zmm17 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm28, %zmm5
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm28, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm28, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm28, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm6, %zmm28, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm28, %zmm6 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm30 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm31, %zmm30 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm31 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm27, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm20, %zmm27, %zmm20
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm29, %zmm20 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm18, %zmm27, %zmm18
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm29, %zmm18 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm23, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm21, %zmm4 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm16, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm16, %zmm1 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm25, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm16, %zmm5 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, 192(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm8, 128(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm13, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, 192(%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm14, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm23, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm26, 64(%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, 128(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 192(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 192(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, 64(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm11, 128(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm29, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, (%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%r8)
 ; AVX512F-NEXT:    vmovdqa64 %zmm12, 128(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 192(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, 192(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, (%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, 64(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 128(%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm27, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm26, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm25, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm31, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm30, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, 128(%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 128(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rax)
 ; AVX512F-NEXT:    addq $3400, %rsp # imm = 0xD48
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -12162,306 +12186,318 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-LABEL: load_i32_stride7_vf64:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm12
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm13
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm14
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm1, %zmm3
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm20
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm14
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm19
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [20,27,6,13,20,27,6,13,20,27,6,13,20,27,6,13]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm6, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm20, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm8, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm12
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm8, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm13
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm8, %zmm13
+; AVX512BW-NEXT:    vpermi2d %zmm20, %zmm2, %zmm8
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
+; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm15, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm17
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm15, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm15, %zmm18
+; AVX512BW-NEXT:    vpermi2d %zmm20, %zmm2, %zmm15
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm21, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm23
+; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm21, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm21, %zmm24
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm20, %zmm21
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm26
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm25, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm27
+; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm25, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm29
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm20, %zmm25
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm30, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm31
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm30, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm30, %zmm4
+; AVX512BW-NEXT:    vpermi2d %zmm20, %zmm6, %zmm30
+; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,0,10,11,12,13,18,25,0,0,10,11,12,13,18,25]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm17, %zmm11, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm13
+; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm17
+; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm26
+; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm28
+; AVX512BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm29
+; AVX512BW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm25
+; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm30
+; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm27
+; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm2, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm30, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,7,14,21,28,0,0,0,0,7,14,21,28,0,0]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
+; AVX512BW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,19,26,0,9,10,11,12,13,19,26]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm17, %zmm11, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm31, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,8,15,22,29,0,0,0,1,8,15,22,29,0,0]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,20,27,0,9,10,11,12,13,20,27]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm17, %zmm11, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm6, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,18,25,0,7,14,0,0,0,18,25,0,7,14,0,0]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm0, %zmm3
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
+; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,21,28,0,9,10,11,12,13,21,28]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm11, %zmm17, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm29, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,19,26,1,8,15,0,0,0,19,26,1,8,15,0,0]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,9,10,11,12,13,22,29,0,9,10,11,12,13,22,29]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm0, %zmm3
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm30, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm31, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm11, %zmm17, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm15
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [25,4,11,18,25,4,11,18,25,4,11,18,25,4,11,18]
-; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm30, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm30, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm30, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm30, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm30, %zmm21
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,9,10,11,12,16,23,30,0,9,10,11,12,16,23,30]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm10, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm30, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm10, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm30, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm10, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm17, %zmm11, %zmm30
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm10, %zmm30
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [26,5,12,19,26,5,12,19,26,5,12,19,26,5,12,19]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm10, %zmm13
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,9,10,11,12,17,24,31,0,9,10,11,12,17,24,31]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm8, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm10, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm8, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm10, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm8, %zmm11
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm8, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [8,15,0,0,0,19,26,1,8,15,0,0,0,19,26,1]
-; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm25, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [27,6,13,20,27,6,13,20,27,6,13,20,27,6,13,20]
-; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm27, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm6, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [28,0,0,0,0,7,14,21,28,0,0,0,0,7,14,21]
-; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm28, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm29, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [29,0,0,0,1,8,15,22,29,0,0,0,1,8,15,22]
-; AVX512BW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm31, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm30, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [14,0,0,0,18,25,0,7,14,0,0,0,18,25,0,7]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm25, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm27, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm16
+; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm16, %zmm30
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm31, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm2, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm2, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm28, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm3, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm31, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm25, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm19, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm27, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm19, %zmm1, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm28, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm19, %zmm1, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm31, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm19, %zmm1, %zmm31
-; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm2, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm6, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm29, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm2, %zmm29
+; AVX512BW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm1, %zmm16
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm25
 ; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm18
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,7,14,21,28,u,u,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm24
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,8,15,22,29,u,u,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm23
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm3, %zmm23
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm4 = <18,25,0,7,14,u,u,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm4, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm3, %zmm26
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = <18,25,0,7,14,u,u,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm6, %zmm27
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <19,26,1,8,15,u,u,u>
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm7, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [4,11,18,25]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [5,12,19,26]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm7, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [4,11,18,25]
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
 ; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm19, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm20 = [6,13,20,27]
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm20, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm20 = [5,12,19,26]
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm20, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm21 = [6,13,20,27]
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm21, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm13
 ; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm14
 ; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm3, %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm7, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm6, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm7, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
 ; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm19, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm20, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm20, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm21, %zmm4
 ; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
 ; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm8
 ; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm1
 ; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10
 ; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm3, %zmm10
 ; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm4, %zmm11
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm11
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm7, %zmm12
+; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm7, %zmm12
 ; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm18, %zmm21
-; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm19, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm19, %zmm22
 ; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm19
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm20, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm20, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm28
+; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm20, %zmm28
+; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm21, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm21, %zmm5
 ; AVX512BW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -12471,14 +12507,14 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm22 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm8 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
-; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm2 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm30, %zmm2 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
 ; AVX512BW-NEXT:    movw $480, %ax # imm = 0x1E0
@@ -12490,15 +12526,14 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm10 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm23 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm26 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm23 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm26 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm14 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm14 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm31, %zmm3 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -12506,17 +12541,18 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm11 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm27 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm27 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm15 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm15 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm4 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm6 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm6 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -12525,88 +12561,92 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm29 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm29 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm17 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm16 {%k1}
-; AVX512BW-NEXT:    vmovdqa32 %zmm27, %zmm7 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm7 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm0, %zmm7 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm21, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm22, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm9, %zmm0 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm20, %zmm9 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm20 {%k1}
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm18, %zmm28, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm25, %zmm18 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm26, %zmm25, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm26, %zmm25 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm22, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm22 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm30, %zmm22 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm19, %zmm21, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm30, %zmm19 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm27, %zmm26 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm28, %zmm27, %zmm28
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm30, %zmm28 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm28, %zmm27 {%k1}
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm19, %zmm31, %zmm19
-; AVX512BW-NEXT:    vmovdqa32 %zmm30, %zmm19 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm17, %zmm28, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm28, %zmm17 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm28, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm28, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm28, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm28, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm6, %zmm28, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm28, %zmm6 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm30 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm31, %zmm30 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm31 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm27, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm20, %zmm27, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm29, %zmm20 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm18, %zmm27, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm29, %zmm18 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm21, %zmm4 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm16, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm16, %zmm1 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm25, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm16, %zmm5 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, 192(%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm8, 128(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 64(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, 192(%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, 64(%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, 128(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 192(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 192(%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, 64(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm11, 128(%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, (%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%r8)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm12, 128(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 192(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 192(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, (%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, 64(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 128(%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, 128(%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rax)
 ; AVX512BW-NEXT:    addq $3400, %rsp # imm = 0xD48
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
index 1bd7025307d2b..56d04a5f55c23 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
@@ -160,8 +160,7 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST-NEXT:    vmovdqa (%rdi), %ymm4
 ; AVX512-FAST-NEXT:    vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
 ; AVX512-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,5,5,13,5,5]
-; AVX512-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [5,13,5,13,5,13,5,13]
 ; AVX512-FAST-NEXT:    vpermi2d %ymm1, %ymm4, %ymm6
 ; AVX512-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
 ; AVX512-FAST-NEXT:    vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
@@ -1618,123 +1617,133 @@ define void @load_i32_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm7
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm8, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm8, %zmm10
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm9
 ; AVX512F-NEXT:    movb $-64, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [0,8,16,24,0,8,16,24]
-; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm9
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,8,16,24]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm9, %zmm16
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
 ; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm9, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [1,9,17,25,1,9,17,25]
-; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm10
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm11, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = [1,9,17,25]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm11
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
 ; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm10, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm10, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [2,10,18,26,2,10,18,26]
-; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm11
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm12, %zmm10
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm10
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = [2,10,18,26]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
 ; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm11, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm11, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [3,11,19,27,3,11,19,27]
-; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm12
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm11
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [3,11,19,27]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm12, %zmm11
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
 ; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm12, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,20,28,4,12,20,28]
-; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm13
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm12
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm14, %zmm12
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm12
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm12
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,12,20,28]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
 ; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm13, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,21,29,5,13,21,29]
-; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm14
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm13
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm13
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm15 = [5,13,21,29]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm15
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm14, %zmm13
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
 ; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm14, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm14, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,22,30,6,14,22,30]
-; AVX512F-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm15
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm14
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm14
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
 ; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm15, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm15, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [7,15,23,31,7,15,23,31]
-; AVX512F-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm4, %zmm3, %zmm5
-; AVX512F-NEXT:    vpermt2d %zmm1, %zmm15, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rsi)
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm14
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14,22,30]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm7, %zmm14
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2d %zmm4, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,15,23,31]
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm16, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm11, (%r8)
 ; AVX512F-NEXT:    vmovdqa64 %zmm12, (%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm13, (%r11)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, (%r10)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, (%r10)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1746,123 +1755,133 @@ define void @load_i32_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm7
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm8, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm8, %zmm10
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm9
 ; AVX512BW-NEXT:    movb $-64, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [0,8,16,24,0,8,16,24]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm9
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,8,16,24]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm9, %zmm16
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
 ; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm9, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [1,9,17,25,1,9,17,25]
-; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm10
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm11, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [1,9,17,25]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
 ; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm10, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [2,10,18,26,2,10,18,26]
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm11
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm12, %zmm10
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm10
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [2,10,18,26]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm11, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm11, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [3,11,19,27,3,11,19,27]
-; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm12
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm11
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [3,11,19,27]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm12, %zmm11
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
 ; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm12, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,20,28,4,12,20,28]
-; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm13
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm12
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm14, %zmm12
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm12
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm12
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,12,20,28]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
 ; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm13, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,21,29,5,13,21,29]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm14
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm13
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm13
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm15 = [5,13,21,29]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm14, %zmm13
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
 ; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm14, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm14, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,22,30,6,14,22,30]
-; AVX512BW-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm15
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm14
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
 ; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm15, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [7,15,23,31,7,15,23,31]
-; AVX512BW-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm3, %zmm5
-; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm15, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rsi)
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14,22,30]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm7, %zmm14
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2d %zmm4, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,15,23,31]
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%r8)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r11)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%r10)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r10)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -3689,563 +3708,571 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-LABEL: load_i32_stride8_vf32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512F-NEXT:    movb $-64, %al
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512F-NEXT:    subq $1288, %rsp # imm = 0x508
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm28
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm23
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm20
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm8, %zmm2
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm7, %zmm0
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm14
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm1, %zmm14
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm9
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm21, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm19, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm26
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm18, %zmm26
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm25
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm22, %zmm25
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm30, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm29, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
 ; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm1, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm23
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm27, %zmm23
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm30
+; AVX512F-NEXT:    vpermi2d %zmm12, %zmm17, %zmm29
+; AVX512F-NEXT:    vpermt2d %zmm12, %zmm1, %zmm17
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm31, %zmm24
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm1, %zmm28
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm15
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm7
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm21
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm19
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm18
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm22
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm27
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm15, %zmm31
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm1, %zmm15
+; AVX512F-NEXT:    movb $-64, %al
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm11
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm4, %zmm1
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,8,16,24]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm10, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm16
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm12
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm4
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm8
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm7, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm10
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm14, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,9,17,25]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm7, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm9, %zmm11
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm10, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm5, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512F-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,23,31,7,15,23,31]
 ; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm14
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm10
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm16, %zmm12, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm7
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm21, %zmm21
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512F-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512F-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512F-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512F-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [2,10,18,26]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm0, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm16
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm19
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm26 {%k1}
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [3,11,19,27]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm4, %zmm7
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm26, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm18 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,12,20,28]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm10
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm9, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = [5,13,21,29]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm11, %zmm14
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [6,14,22,30]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [7,15,23,31]
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm18, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm11
+; AVX512F-NEXT:    vpermi2d %zmm6, %zmm8, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm18, %zmm8
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm1 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm1 = ymm10[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm25, %zmm2
+; AVX512F-NEXT:    vpblendd $240, (%rsp), %ymm9, %ymm5 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm5 = ymm9[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm22, %zmm5
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm9 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm23 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm23, %zmm9
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm10 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm27 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm27, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm24 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm24, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm31 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm31, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm28 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm11 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm11 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm28, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm8, 64(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm8, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 64(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm8, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 64(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, (%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, 64(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512F-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, (%rax)
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512F-NEXT:    addq $1288, %rsp # imm = 0x508
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i32_stride8_vf32:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512BW-NEXT:    subq $1288, %rsp # imm = 0x508
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm28
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm20
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm8, %zmm2
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm7, %zmm0
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm14
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm1, %zmm14
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm9
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm21, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm19, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm26
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm18, %zmm26
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm25
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm22, %zmm25
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm30, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm29, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm1, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm23
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm27, %zmm23
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm30
+; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm17, %zmm29
+; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm1, %zmm17
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm31, %zmm24
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm1, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm15
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm7
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm21
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm19
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm18
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm22
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm27
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm15, %zmm31
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm1, %zmm15
 ; AVX512BW-NEXT:    movb $-64, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm11
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm4, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,8,16,24]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm10, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm12
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm8
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm7, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm10
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm14, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,9,17,25]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm7, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm9, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm9, %zmm11
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm10, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm5, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,21,29,5,13,21,29]
 ; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm14
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm10
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm16, %zmm12, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512BW-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm21, %zmm21
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512BW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512BW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512BW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512BW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [2,10,18,26]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm26 {%k1}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [3,11,19,27]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm4, %zmm7
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm26, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm18 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,12,20,28]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm9, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [5,13,21,29]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm11, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [6,14,22,30]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [7,15,23,31]
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm18, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm11
+; AVX512BW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm18, %zmm8
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm1 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm1 = ymm10[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm25, %zmm2
+; AVX512BW-NEXT:    vpblendd $240, (%rsp), %ymm9, %ymm5 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm5 = ymm9[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm22, %zmm5
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm9 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm23 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm23, %zmm9
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm10 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm27 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm27, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm24 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm24, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm31 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm31, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm28 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm11 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm11 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm28, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm8, 64(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm8, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 64(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm8, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rax)
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512BW-NEXT:    addq $1288, %rsp # imm = 0x508
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <256 x i32>, ptr %in.vec, align 64
@@ -8127,1227 +8154,1177 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-LABEL: load_i32_stride8_vf64:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $3304, %rsp # imm = 0xCE8
-; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm7
-; AVX512F-NEXT:    vmovaps 1152(%rdi), %zmm0
-; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm23
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm22
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm28
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm30
-; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm27
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm16
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512F-NEXT:    subq $3272, %rsp # imm = 0xCC8
+; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm15
+; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm24
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm10, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm10, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm30
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm10, %zmm30
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm10
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm2
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm6, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm22, %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm6, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm15, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm15, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm13, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm25
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm3, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm7, %zmm26
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm11, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm0, %zmm2
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm24, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm12, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm25
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm3, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm31
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm7, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm11, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm24, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm12, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm13, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm23
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm3, %zmm6
+; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm8
+; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm17
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm20
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm7, %zmm20
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm11, %zmm22
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm28
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm24, %zmm28
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm12, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm12
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm13, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm8, %zmm17, %zmm13
+; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm1, %zmm17
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm27
 ; AVX512F-NEXT:    movb $-64, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm18
 ; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
 ; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm1, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm13, %zmm0, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm29
-; AVX512F-NEXT:    vmovdqu64 %zmm19, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm31
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm11, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm30, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm19
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm23, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm3
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm18
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm22
-; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm10, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512F-NEXT:    vpermi2d %zmm2, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm26
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm9
-; AVX512F-NEXT:    vpermi2d %zmm26, %zmm9, %zmm1
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm29
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,8,16,24]
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm0, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm8
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm1, %zmm5
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm21
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm6 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm16
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm1, %zmm4
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm19
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm3 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm15
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm1
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm30
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm13
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm0
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm23, %zmm0, %zmm8
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [1,9,17,25,1,9,17,25]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm29, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm31, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm11, %zmm0, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm1, %zmm13
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm29, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm31, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm10, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm25, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm26, %zmm9, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm18
-; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm14
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm23, %zmm0, %zmm4
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [2,10,18,26,2,10,18,26]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm24
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm11, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm23
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,9,17,25]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm10
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm20, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm2, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm26, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm25, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm5
-; AVX512F-NEXT:    vpermt2d %zmm29, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm8
-; AVX512F-NEXT:    vpermt2d %zmm31, %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vpermi2d {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm0 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm10, %zmm3, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm2, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm31, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm7, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm26, %zmm0, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm14, %zmm0, %zmm15
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [3,11,19,27,3,11,19,27]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512F-NEXT:    vpermt2d {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm6 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm29
-; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm16
-; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm23, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm15
-; AVX512F-NEXT:    vpermt2d %zmm25, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm11, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm22
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm31, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm12, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [2,10,18,26,2,10,18,26]
+; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,10,18,26]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm22, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm12, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm8, %zmm3, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm26, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm0, %zmm4
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,20,28,4,12,20,28]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm24, %zmm0, %zmm29
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm28, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm24, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm16
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm31, %zmm1, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm20, %zmm0, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm22
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm21, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vpermi2d %zmm12, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm8, %zmm2, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm30, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm12
-; AVX512F-NEXT:    vpermt2d %zmm26, %zmm7, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,21,29,5,13,21,29]
-; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm9, %zmm2, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm24, %zmm7, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm16, %zmm2, %zmm14
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm9
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm7, %zmm9
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm30
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm10, %zmm30
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm21
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm25
-; AVX512F-NEXT:    vpermt2d %zmm28, %zmm7, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm31
-; AVX512F-NEXT:    vpermt2d %zmm28, %zmm10, %zmm31
-; AVX512F-NEXT:    vpermt2d %zmm28, %zmm2, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm24, %zmm10, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm24, %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm18
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm7, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm16
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm10, %zmm16
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm2, %zmm19
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm7, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm17
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm10, %zmm17
-; AVX512F-NEXT:    vpermt2d %zmm27, %zmm2, %zmm23
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm7, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm10, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm7, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm28
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm10, %zmm28
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm22
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm7, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm10, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm10, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm5
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm10, %zmm5
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm26, %zmm10, %zmm8
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
-; AVX512F-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm2, %zmm27
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm6, %zmm2, %zmm12
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [3,11,19,27,3,11,19,27]
+; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [3,11,19,27]
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm25 # 32-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm11 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm28, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [4,12,20,28,4,12,20,28]
 ; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm0, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm11, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,12,20,28]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm31
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2d %zmm14, %zmm1, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm22, %zmm21
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,14,22,30,6,14,22,30]
-; AVX512F-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm7
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm3, %zmm7
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [7,15,23,31,7,15,23,31]
-; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm11, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm4, %zmm11, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm22
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm3, %zmm2
-; AVX512F-NEXT:    vpermi2d %zmm14, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm14, %zmm11, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512F-NEXT:    vpermt2d %zmm22, %zmm11, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm6
-; AVX512F-NEXT:    vpblendd $15, (%rsp), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm7 = mem[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm31 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm31, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm17 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm24 {%k1}
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm24, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm20 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm3 = mem[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm20, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm23 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm4 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm23, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm7 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm7 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm29, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm26 {%k1}
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm8 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm26, %zmm8
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 192(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 128(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 64(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, (%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 192(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, (%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 64(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 128(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 192(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, (%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 64(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 128(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 192(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, (%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 64(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 128(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 192(%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, (%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 64(%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 128(%r9)
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm21, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm25, 64(%rax)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm2, 128(%rax)
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512F-NEXT:    addq $3304, %rsp # imm = 0xCE8
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm26
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm28
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,21,29,5,13,21,29]
+; AVX512F-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm7, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,13,21,29]
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm7, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm2, %zmm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,22,30,6,14,22,30]
+; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm1, %zmm11
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [7,15,23,31,7,15,23,31]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2d %zmm8, %zmm10, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm7, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm1, %zmm3
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm10, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm1, %zmm8
+; AVX512F-NEXT:    vpermi2d %zmm9, %zmm15, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm9, %zmm10, %zmm15
+; AVX512F-NEXT:    vpermt2d %zmm16, %zmm10, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm9, %zmm20
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm2, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm16
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,14,22,30]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm7, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [7,15,23,31]
+; AVX512F-NEXT:    vpermt2d %zmm21, %zmm10, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm7, %zmm14
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm10, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm7, %zmm12
+; AVX512F-NEXT:    vpermi2d %zmm30, %zmm13, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm30, %zmm10, %zmm13
+; AVX512F-NEXT:    vpermt2d %zmm19, %zmm10, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm12 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm11, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm11, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm25 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm7 = ymm5[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm25, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm23 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm10 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm10 = ymm4[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm23, %zmm10
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm17 {%k1}
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm17, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm27 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm27, %zmm6
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 192(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 128(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 64(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm11, (%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 192(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, (%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 64(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 128(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 192(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, (%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 64(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 128(%rcx)
+; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 192(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, (%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 64(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm4, 128(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, 192(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm26, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, 64(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm31, 128(%r9)
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm24, 128(%rax)
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, 128(%rax)
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512F-NEXT:    addq $3272, %rsp # imm = 0xCC8
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i32_stride8_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $3304, %rsp # imm = 0xCE8
-; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovaps 1152(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm23
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm27
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512BW-NEXT:    subq $3272, %rsp # imm = 0xCC8
+; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm15
+; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm24
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm10, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm30
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm10, %zmm30
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm10
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm8, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm6, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm8, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm15, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm25
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm3, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm7, %zmm26
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm11, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm0, %zmm2
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm24, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm12, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm3, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm31
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm7, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm11, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm24, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm12, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm13, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm3, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm17
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm20
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm7, %zmm20
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm11, %zmm22
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm28
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm24, %zmm28
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm12, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm12
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm13, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm17, %zmm13
+; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm1, %zmm17
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm1, %zmm27
 ; AVX512BW-NEXT:    movb $-64, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm18
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
 ; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm13, %zmm0, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm31
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm19
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm3
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm10, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512BW-NEXT:    vpermi2d %zmm2, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm9
-; AVX512BW-NEXT:    vpermi2d %zmm26, %zmm9, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm29
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,8,16,24]
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm0, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm21
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm6 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm19
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm3 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm15
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm30
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm13
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm0
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm0, %zmm8
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [1,9,17,25,1,9,17,25]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm31, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm0, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm1, %zmm13
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm31, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm10, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm25, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm26, %zmm9, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm0, %zmm4
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [2,10,18,26,2,10,18,26]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm24
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,9,17,25]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm10
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm20, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm2, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm26, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm5
-; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm8
-; AVX512BW-NEXT:    vpermt2d %zmm31, %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vpermi2d {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm0 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm10, %zmm3, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm2, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm31, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm7, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm0, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm0, %zmm15
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [3,11,19,27,3,11,19,27]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512BW-NEXT:    vpermt2d {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm6 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm15
-; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm11, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm22
-; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm31, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm12, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [2,10,18,26,2,10,18,26]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [2,10,18,26]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm22, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm3, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm0, %zmm4
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,20,28,4,12,20,28]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm0, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm28, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm31, %zmm1, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm22
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vpermi2d %zmm12, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm8, %zmm2, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm12
-; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm7, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,21,29,5,13,21,29]
-; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm7, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm2, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm7, %zmm9
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm30
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm10, %zmm30
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm25
-; AVX512BW-NEXT:    vpermt2d %zmm28, %zmm7, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm31
-; AVX512BW-NEXT:    vpermt2d %zmm28, %zmm10, %zmm31
-; AVX512BW-NEXT:    vpermt2d %zmm28, %zmm2, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm10, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm2, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm18
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm7, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm16
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm10, %zmm16
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm2, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm7, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm17
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm10, %zmm17
-; AVX512BW-NEXT:    vpermt2d %zmm27, %zmm2, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm7, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm2, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm7, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm28
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm10, %zmm28
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm7, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm10, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [3,11,19,27,3,11,19,27]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [3,11,19,27]
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm28, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [4,12,20,28,4,12,20,28]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,12,20,28]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm26
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm15
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm10, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm0
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm28
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,21,29,5,13,21,29]
+; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm7, %zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,13,21,29]
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm7, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,22,30,6,14,22,30]
+; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm1, %zmm11
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [7,15,23,31,7,15,23,31]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2d %zmm8, %zmm10, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm7, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm10, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermi2d %zmm9, %zmm15, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm9, %zmm10, %zmm15
+; AVX512BW-NEXT:    vpermt2d %zmm16, %zmm10, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm10, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm26, %zmm10, %zmm8
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm10
-; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm2, %zmm27
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm2, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm2, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm25 # 32-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm11 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [5,13,21,29,5,13,21,29]
-; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm11, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm22 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2d %zmm14, %zmm1, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm22, %zmm21
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,14,22,30,6,14,22,30]
-; AVX512BW-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm9, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm2, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,14,22,30]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm7, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [7,15,23,31]
+; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm10, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm7, %zmm14
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm10, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm7, %zmm12
+; AVX512BW-NEXT:    vpermi2d %zmm30, %zmm13, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm30, %zmm10, %zmm13
+; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm10, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm3, %zmm7
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [7,15,23,31,7,15,23,31]
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm11, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm4, %zmm11, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm12 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm11, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm11, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm25 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm7 = ymm5[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm25, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm23 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm10 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm10 = ymm4[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm23, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm17 {%k1}
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm15[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm17, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermi2d %zmm14, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2d %zmm14, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512BW-NEXT:    vpermt2d %zmm22, %zmm11, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm6
-; AVX512BW-NEXT:    vpblendd $15, (%rsp), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm31 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm31, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm17 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm24 {%k1}
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm24, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm20 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm3 = mem[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm20, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm23 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm4 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm23, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm7 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm29, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm26 {%k1}
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm8 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm26, %zmm8
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, (%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, (%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, (%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 192(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, (%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 64(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 128(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 192(%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, (%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 64(%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 128(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm27 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm27, %zmm6
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 192(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 128(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 64(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm11, (%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 192(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, (%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 64(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 128(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 192(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, (%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 64(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 128(%rcx)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 192(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, (%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 64(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm4, 128(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, 192(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, 128(%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, 64(%rax)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 128(%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 64(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm8, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512BW-NEXT:    addq $3304, %rsp # imm = 0xCE8
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512BW-NEXT:    addq $3272, %rsp # imm = 0xCC8
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <512 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
index faed36dfe51be..f04a99445f107 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
@@ -392,27 +392,28 @@ define void @load_i64_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm3
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,0,4,8,12]
 ; AVX512-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm4, %zmm5
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,4,8,12]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,1,5,9,13]
 ; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm5, %zmm6
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,5,9,13]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,2,6,10,14]
 ; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm6, %zmm7
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = [2,6,10,14]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm7[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,3,7,11,15]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm7, %zmm2
-; AVX512-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [3,7,11,15]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm7[4,5,6,7]
 ; AVX512-NEXT:    vmovdqa64 %zmm4, (%rsi)
 ; AVX512-NEXT:    vmovdqa64 %zmm5, (%rdx)
 ; AVX512-NEXT:    vmovdqa64 %zmm6, (%rcx)
@@ -849,63 +850,64 @@ define void @load_i64_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512-LABEL: load_i64_stride4_vf16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm3
-; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm6
-; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm7
+; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm3
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm5
+; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm6
+; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm7
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,4,8,12,0,4,8,12]
 ; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512-NEXT:    vpermt2q %zmm6, %zmm8, %zmm9
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm8, %zmm10
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm8, %zmm10
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm8
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm10[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512-NEXT:    vpermt2q %zmm7, %zmm8, %zmm9
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,4,8,12]
+; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm10, %zmm11
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm8
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm10
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[0,1,2,3],zmm8[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,5,9,13,1,5,9,13]
 ; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm11
-; AVX512-NEXT:    vpermt2q %zmm6, %zmm10, %zmm11
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm12
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm10, %zmm12
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm12[0,1,2,3],zmm11[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm10, %zmm12
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm10
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm12[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm11
+; AVX512-NEXT:    vpermt2q %zmm7, %zmm10, %zmm11
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm12 = [1,5,9,13]
+; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm12, %zmm13
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm13[0,1,2,3],zmm11[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm10
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm12
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,6,10,14,2,6,10,14]
 ; AVX512-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512-NEXT:    vpermt2q %zmm6, %zmm12, %zmm13
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm14
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm12, %zmm14
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm13[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm12, %zmm14
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],zmm14[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512-NEXT:    vpermt2q %zmm7, %zmm12, %zmm13
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,6,10,14]
+; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm14, %zmm15
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm15[0,1,2,3],zmm13[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm12
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm14
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm14[0,1,2,3],zmm12[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [3,7,11,15,3,7,11,15]
 ; AVX512-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2q %zmm6, %zmm14, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7]
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm9, 64(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm8, (%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm11, 64(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm10, (%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm13, 64(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm12, (%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm4, 64(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm0, (%r8)
+; AVX512-NEXT:    vpermt2q %zmm7, %zmm14, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = [3,7,11,15]
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm7, %zmm4
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm6[4,5,6,7]
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm7, %zmm1
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm3[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm8, 64(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm9, (%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm10, 64(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm11, (%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm12, 64(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm13, (%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm0, 64(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm4, (%r8)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %wide.vec = load <64 x i64>, ptr %in.vec, align 64
@@ -1829,115 +1831,116 @@ define void @load_i64_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512-LABEL: load_i64_stride4_vf32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm3
-; AVX512-NEXT:    vmovdqa64 832(%rdi), %zmm4
-; AVX512-NEXT:    vmovdqa64 768(%rdi), %zmm5
-; AVX512-NEXT:    vmovdqa64 960(%rdi), %zmm10
-; AVX512-NEXT:    vmovdqa64 896(%rdi), %zmm7
-; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm14
-; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm15
+; AVX512-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 768(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 960(%rdi), %zmm3
+; AVX512-NEXT:    vmovdqa64 896(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm6
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm8
+; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm9
+; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm12
+; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm13
+; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm14
 ; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm17
 ; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm16
-; AVX512-NEXT:    vmovdqa64 576(%rdi), %zmm8
-; AVX512-NEXT:    vmovdqa64 512(%rdi), %zmm9
-; AVX512-NEXT:    vmovdqa64 704(%rdi), %zmm12
-; AVX512-NEXT:    vmovdqa64 640(%rdi), %zmm11
+; AVX512-NEXT:    vmovdqa64 576(%rdi), %zmm4
+; AVX512-NEXT:    vmovdqa64 512(%rdi), %zmm5
+; AVX512-NEXT:    vmovdqa64 704(%rdi), %zmm10
+; AVX512-NEXT:    vmovdqa64 640(%rdi), %zmm7
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,4,8,12,0,4,8,12]
 ; AVX512-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm6
-; AVX512-NEXT:    vpermt2q %zmm12, %zmm19, %zmm6
-; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512-NEXT:    vpermt2q %zmm8, %zmm19, %zmm13
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm13[0,1,2,3],zmm6[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm13
-; AVX512-NEXT:    vpermt2q %zmm17, %zmm19, %zmm13
-; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm18
-; AVX512-NEXT:    vpermt2q %zmm14, %zmm19, %zmm18
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm18[0,1,2,3],zmm13[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512-NEXT:    vpermt2q %zmm10, %zmm19, %zmm18
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm20
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm19, %zmm20
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm20[0,1,2,3],zmm18[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm20
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm19, %zmm20
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm19
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,2,3],zmm20[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm11
+; AVX512-NEXT:    vpermt2q %zmm10, %zmm19, %zmm11
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,4,8,12]
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm20, %zmm15
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm15[0,1,2,3],zmm11[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm19, %zmm15
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm18
+; AVX512-NEXT:    vpermt2q %zmm13, %zmm20, %zmm18
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm18[0,1,2,3],zmm15[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512-NEXT:    vpermt2q %zmm12, %zmm19, %zmm18
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm21
+; AVX512-NEXT:    vpermt2q %zmm8, %zmm20, %zmm21
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm21[0,1,2,3],zmm18[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm19
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm20
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm20[0,1,2,3],zmm19[4,5,6,7]
 ; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [1,5,9,13,1,5,9,13]
 ; AVX512-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm20
-; AVX512-NEXT:    vpermt2q %zmm12, %zmm21, %zmm20
-; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm22
-; AVX512-NEXT:    vpermt2q %zmm8, %zmm21, %zmm22
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm22[0,1,2,3],zmm20[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm22
-; AVX512-NEXT:    vpermt2q %zmm17, %zmm21, %zmm22
-; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm23
-; AVX512-NEXT:    vpermt2q %zmm14, %zmm21, %zmm23
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],zmm22[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512-NEXT:    vpermt2q %zmm10, %zmm21, %zmm23
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm24
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm21, %zmm24
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm20
+; AVX512-NEXT:    vpermt2q %zmm10, %zmm21, %zmm20
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [1,5,9,13]
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm23
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm22, %zmm23
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm23[0,1,2,3],zmm20[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm23
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm21, %zmm23
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm24
+; AVX512-NEXT:    vpermt2q %zmm13, %zmm22, %zmm24
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm24[0,1,2,3],zmm23[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm24
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm21, %zmm24
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm21
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],zmm24[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [2,6,10,14,2,6,10,14]
-; AVX512-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm25
-; AVX512-NEXT:    vpermt2q %zmm12, %zmm24, %zmm25
-; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm26
-; AVX512-NEXT:    vpermt2q %zmm8, %zmm24, %zmm26
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm26[0,1,2,3],zmm25[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm26
-; AVX512-NEXT:    vpermt2q %zmm17, %zmm24, %zmm26
-; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm27
-; AVX512-NEXT:    vpermt2q %zmm14, %zmm24, %zmm27
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm27[0,1,2,3],zmm26[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm27
-; AVX512-NEXT:    vpermt2q %zmm10, %zmm24, %zmm27
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm28
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm24, %zmm28
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm24
+; AVX512-NEXT:    vpermt2q %zmm12, %zmm21, %zmm24
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm25
+; AVX512-NEXT:    vpermt2q %zmm8, %zmm22, %zmm25
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm25[0,1,2,3],zmm24[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm21
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm22
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm22[0,1,2,3],zmm21[4,5,6,7]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [2,6,10,14,2,6,10,14]
+; AVX512-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm25
+; AVX512-NEXT:    vpermt2q %zmm10, %zmm22, %zmm25
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [2,6,10,14]
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm27
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm26, %zmm27
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm27[0,1,2,3],zmm25[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm27
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm22, %zmm27
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm28
+; AVX512-NEXT:    vpermt2q %zmm13, %zmm26, %zmm28
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm28[0,1,2,3],zmm27[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm28
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm24, %zmm28
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm24
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,2,3],zmm28[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [3,7,11,15,3,7,11,15]
-; AVX512-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2q %zmm17, %zmm28, %zmm16
-; AVX512-NEXT:    vpermt2q %zmm14, %zmm28, %zmm15
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm15[0,1,2,3],zmm16[4,5,6,7]
-; AVX512-NEXT:    vpermt2q %zmm10, %zmm28, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm4, %zmm28, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7]
-; AVX512-NEXT:    vpermt2q %zmm12, %zmm28, %zmm11
-; AVX512-NEXT:    vpermt2q %zmm8, %zmm28, %zmm9
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm9[0,1,2,3],zmm11[4,5,6,7]
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm28, %zmm2
-; AVX512-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm18, 192(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm19, (%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm13, 64(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm6, 128(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm23, 192(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm21, (%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm22, 64(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm28
+; AVX512-NEXT:    vpermt2q %zmm12, %zmm22, %zmm28
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512-NEXT:    vpermt2q %zmm8, %zmm26, %zmm29
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm29[0,1,2,3],zmm28[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm22
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm26
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm26[0,1,2,3],zmm22[4,5,6,7]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [3,7,11,15,3,7,11,15]
+; AVX512-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm26, %zmm16
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [3,7,11,15]
+; AVX512-NEXT:    vpermt2q %zmm13, %zmm17, %zmm14
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm16[4,5,6,7]
+; AVX512-NEXT:    vpermt2q %zmm12, %zmm26, %zmm9
+; AVX512-NEXT:    vpermt2q %zmm8, %zmm17, %zmm6
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm9[4,5,6,7]
+; AVX512-NEXT:    vpermt2q %zmm3, %zmm26, %zmm2
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm17, %zmm1
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512-NEXT:    vpermt2q %zmm10, %zmm26, %zmm7
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm17, %zmm5
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm7[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm19, 192(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm15, 64(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm11, 128(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm21, 192(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm24, (%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm23, 64(%rdx)
 ; AVX512-NEXT:    vmovdqa64 %zmm20, 128(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm27, 192(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm26, 64(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm28, (%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm27, 64(%rcx)
 ; AVX512-NEXT:    vmovdqa64 %zmm25, 128(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm5, 128(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm4, 192(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm0, (%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm14, 64(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm1, 128(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm0, 192(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm6, (%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm13, 64(%r8)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %wide.vec = load <128 x i64>, ptr %in.vec, align 64
@@ -3833,297 +3836,297 @@ define void @load_i64_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512-LABEL: load_i64_stride4_vf64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    subq $2056, %rsp # imm = 0x808
-; AVX512-NEXT:    vmovdqa64 1472(%rdi), %zmm24
-; AVX512-NEXT:    vmovdqa64 1408(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 832(%rdi), %zmm26
-; AVX512-NEXT:    vmovdqa64 768(%rdi), %zmm1
-; AVX512-NEXT:    vmovdqa64 960(%rdi), %zmm28
-; AVX512-NEXT:    vmovdqa64 896(%rdi), %zmm2
-; AVX512-NEXT:    vmovdqa64 576(%rdi), %zmm25
-; AVX512-NEXT:    vmovdqa64 512(%rdi), %zmm3
-; AVX512-NEXT:    vmovdqa64 704(%rdi), %zmm27
-; AVX512-NEXT:    vmovdqa64 640(%rdi), %zmm4
-; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm30
-; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm29
-; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm6
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,4,8,12,0,4,8,12]
-; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm29, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm30, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, (%rsp) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm27, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm25, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm28, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm26, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm24, %zmm9, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [1,5,9,13,1,5,9,13]
-; AVX512-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm29, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm30, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm28, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm26, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm27, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm25, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [2,6,10,14,2,6,10,14]
-; AVX512-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm29, %zmm22, %zmm7
-; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [3,7,11,15,3,7,11,15]
-; AVX512-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2q %zmm29, %zmm31, %zmm6
-; AVX512-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512-NEXT:    vpermt2q %zmm30, %zmm22, %zmm6
-; AVX512-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm30, %zmm31, %zmm5
-; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512-NEXT:    vpermt2q %zmm28, %zmm22, %zmm5
-; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm28, %zmm31, %zmm2
-; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512-NEXT:    vpermt2q %zmm26, %zmm22, %zmm2
+; AVX512-NEXT:    subq $2120, %rsp # imm = 0x848
+; AVX512-NEXT:    vmovdqa64 1984(%rdi), %zmm18
+; AVX512-NEXT:    vmovdqa64 1728(%rdi), %zmm17
+; AVX512-NEXT:    vmovdqa64 1664(%rdi), %zmm7
+; AVX512-NEXT:    vmovdqa64 1472(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 1408(%rdi), %zmm8
+; AVX512-NEXT:    vmovdqa64 1216(%rdi), %zmm6
+; AVX512-NEXT:    vmovdqa64 1152(%rdi), %zmm9
+; AVX512-NEXT:    vmovdqa64 960(%rdi), %zmm3
+; AVX512-NEXT:    vmovdqa64 896(%rdi), %zmm10
+; AVX512-NEXT:    vmovdqa64 704(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 640(%rdi), %zmm11
+; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm4
+; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm12
+; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm5
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [0,4,8,12,0,4,8,12]
+; AVX512-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm13, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm13, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm14
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm13, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm14
+; AVX512-NEXT:    vpermt2q %zmm3, %zmm13, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm14
+; AVX512-NEXT:    vpermt2q %zmm6, %zmm13, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm14
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm13, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [1,5,9,13,1,5,9,13]
+; AVX512-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm14, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm14, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm3, %zmm14, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm14, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm14, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm6, %zmm14, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,6,10,14,2,6,10,14]
+; AVX512-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm27, %zmm16
+; AVX512-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,7,11,15,3,7,11,15]
+; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermt2q %zmm4, %zmm0, %zmm15
+; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm27, %zmm4
+; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm0, %zmm12
+; AVX512-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512-NEXT:    vpermt2q %zmm3, %zmm27, %zmm4
+; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vpermt2q %zmm3, %zmm0, %zmm10
+; AVX512-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm27, %zmm3
+; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm0, %zmm11
+; AVX512-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm27, %zmm2
 ; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm26, %zmm31, %zmm1
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
+; AVX512-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512-NEXT:    vpermt2q %zmm6, %zmm27, %zmm1
 ; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm27, %zmm22, %zmm1
+; AVX512-NEXT:    vpermt2q %zmm6, %zmm0, %zmm9
+; AVX512-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm13, %zmm1
 ; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm27, %zmm31, %zmm4
-; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm25, %zmm22, %zmm1
+; AVX512-NEXT:    vmovdqa64 1920(%rdi), %zmm30
+; AVX512-NEXT:    vpermi2q %zmm18, %zmm30, %zmm13
+; AVX512-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm14, %zmm1
 ; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm25, %zmm31, %zmm3
-; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm24, %zmm15, %zmm1
+; AVX512-NEXT:    vpermi2q %zmm18, %zmm30, %zmm14
+; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm27, %zmm1
 ; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm24, %zmm22, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm24, %zmm31, %zmm0
+; AVX512-NEXT:    vpermi2q %zmm18, %zmm30, %zmm27
+; AVX512-NEXT:    vpermt2q %zmm18, %zmm0, %zmm30
+; AVX512-NEXT:    vpermt2q %zmm17, %zmm0, %zmm7
+; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm20
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,4,8,12]
+; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,5,9,13]
+; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
 ; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [2,6,10,14]
+; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm18, %zmm0
+; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [3,7,11,15]
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm24, %zmm20
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm25
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm29
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm3, %zmm29
+; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm18, %zmm0
+; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm24, %zmm25
+; AVX512-NEXT:    vmovdqa64 832(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqa64 768(%rdi), %zmm12
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm23
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm3, %zmm23
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm28
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm4, %zmm28
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm31
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm18, %zmm31
+; AVX512-NEXT:    vpermt2q %zmm2, %zmm24, %zmm12
+; AVX512-NEXT:    vmovdqa64 576(%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm19
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm3, %zmm19
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm22
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm4, %zmm22
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm26
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm18, %zmm26
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm24, %zmm11
 ; AVX512-NEXT:    vmovdqa64 1344(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1280(%rdi), %zmm24
-; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm24
-; AVX512-NEXT:    vmovdqa64 1216(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1152(%rdi), %zmm28
-; AVX512-NEXT:    vmovdqa64 %zmm28, %zmm30
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm30
-; AVX512-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm28
+; AVX512-NEXT:    vmovdqa64 1280(%rdi), %zmm14
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm15
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm3, %zmm15
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm4, %zmm17
+; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm21
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm18, %zmm21
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm24, %zmm14
 ; AVX512-NEXT:    vmovdqa64 1088(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1024(%rdi), %zmm23
-; AVX512-NEXT:    vmovdqa64 %zmm23, %zmm26
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm26
-; AVX512-NEXT:    vmovdqa64 %zmm23, %zmm29
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm29
-; AVX512-NEXT:    vmovdqa64 %zmm23, %zmm1
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm23
-; AVX512-NEXT:    vmovdqa64 1984(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1920(%rdi), %zmm13
-; AVX512-NEXT:    vmovdqa64 %zmm13, %zmm20
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm20
-; AVX512-NEXT:    vmovdqa64 %zmm13, %zmm25
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm25
-; AVX512-NEXT:    vmovdqa64 %zmm13, %zmm27
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm27
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm13
+; AVX512-NEXT:    vmovdqa64 1024(%rdi), %zmm9
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm10
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm3, %zmm10
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm13
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm4, %zmm13
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm16
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm18, %zmm16
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm24, %zmm9
+; AVX512-NEXT:    vmovdqa64 1600(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 1536(%rdi), %zmm5
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm3, %zmm7
 ; AVX512-NEXT:    vmovdqa64 1856(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1792(%rdi), %zmm11
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm18
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm18
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm19
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm19
-; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm21
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm21
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm11
-; AVX512-NEXT:    vmovdqa64 1728(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1664(%rdi), %zmm8
-; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm14
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm14
-; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm16
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm16
-; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm17
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm8
-; AVX512-NEXT:    vmovdqa64 1600(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm6
-; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm7
-; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm12
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm12
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm3
-; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm0
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm9, %zmm4
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm15, %zmm5
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm22, %zmm10
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm2
-; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm1
-; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm0
-; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
-; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm15
-; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm22
-; AVX512-NEXT:    vpermt2q %zmm0, %zmm31, %zmm1
-; AVX512-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm0 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm0 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm31 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm31 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm26[0,1,2,3],zmm30[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm30 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm30 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm14[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm18[0,1,2,3],zmm20[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm4[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm7[0,1,2,3],zmm16[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 1792(%rdi), %zmm2
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm4, %zmm6
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm4
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm18, %zmm8
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm18
+; AVX512-NEXT:    vpermt2q %zmm0, %zmm24, %zmm2
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm24, %zmm5
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm0 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm0 = zmm29[0,1,2,3],mem[4,5,6,7]
 ; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm0 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm1 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm19 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm19 = zmm19[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm23 = zmm23[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm10 = zmm10[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm15 = zmm15[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm0 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm0 = zmm6[0,1,2,3],mem[4,5,6,7]
 ; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm16 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm18 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm18 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm29 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm29 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,2,3],zmm25[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm15[0,1,2,3],zmm5[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],zmm17[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm15 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm24 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm24 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm29 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm29 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm28 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm28 = zmm28[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm22 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm22 = zmm22[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm17 = zmm17[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm13 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm4 = zmm4[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
 ; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm4 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm6 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
 ; AVX512-NEXT:    # zmm0 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],zmm27[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm22[0,1,2,3],zmm10[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm22 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm22 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm27 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm27 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm24 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm24 = zmm24[0,1,2,3],mem[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],zmm28[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm11[0,1,2,3],zmm13[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm8[4,5,6,7]
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[4,5,6,7]
-; AVX512-NEXT:    vmovdqa64 %zmm14, 448(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm6, 384(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm30, 320(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm26, 256(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm31, 192(%rsi)
-; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT:    vmovaps %zmm2, 128(%rsi)
-; AVX512-NEXT:    vmovups (%rsp), %zmm2 # 64-byte Reload
-; AVX512-NEXT:    vmovaps %zmm2, 64(%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm9, (%rsi)
-; AVX512-NEXT:    vmovdqa64 %zmm19, 448(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm29, 256(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm20, 320(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm18, 128(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm16, 192(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT:    vmovaps %zmm2, 64(%rdx)
-; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512-NEXT:    vmovaps %zmm2, 384(%rdx)
-; AVX512-NEXT:    vmovdqa64 %zmm21, 448(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm0, 256(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm4, 320(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm25, 128(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm17, 192(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm10, (%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm15, 64(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm12, 384(%rcx)
-; AVX512-NEXT:    vmovdqa64 %zmm3, 384(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm11, 448(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm23, 256(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm24, 320(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm7, 128(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm27, 192(%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm1, (%r8)
-; AVX512-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512-NEXT:    addq $2056, %rsp # imm = 0x808
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm31 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm31 = zmm31[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm26 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm26 = zmm26[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm21 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm16 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm16 = zmm16[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm18[0,1,2,3],zmm27[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm20 = zmm20[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm25 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm25 = zmm25[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm12 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm12 = zmm12[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm11 = zmm11[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm14 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm14 = zmm14[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm30[4,5,6,7]
+; AVX512-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
+; AVX512-NEXT:    vmovdqa64 %zmm3, 448(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm7, 384(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm15, 320(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm10, 256(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm23, 192(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm19, 128(%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm1, 64(%rsi)
+; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT:    vmovaps %zmm1, (%rsi)
+; AVX512-NEXT:    vmovdqa64 %zmm4, 448(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm13, 256(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm17, 320(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm22, 128(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm28, 192(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm29, (%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm24, 64(%rdx)
+; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT:    vmovaps %zmm1, 384(%rdx)
+; AVX512-NEXT:    vmovdqa64 %zmm18, 448(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm16, 256(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm21, 320(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm26, 128(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm31, 192(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm0, (%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm6, 64(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm8, 384(%rcx)
+; AVX512-NEXT:    vmovdqa64 %zmm5, 384(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm2, 448(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm9, 256(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm14, 320(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm11, 128(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm12, 192(%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm25, (%r8)
+; AVX512-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512-NEXT:    addq $2120, %rsp # imm = 0x848
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %wide.vec = load <256 x i64>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
index 4a2c38168bdf4..ccdb717d5db79 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
@@ -5491,27 +5491,27 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm19
 ; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm5
 ; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm26
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm6
 ; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm2
 ; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm7
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [12,1,6,0,12,1,6,0]
 ; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm16
 ; AVX512F-NEXT:    vpermt2q %zmm2, %zmm15, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm15, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm15, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, (%rsp) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm15, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm21, %zmm15, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -5521,13 +5521,13 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q %zmm7, %zmm9, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm9, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm9, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm9, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm9, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm9, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm9, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm4, %zmm9, %zmm3
@@ -5538,13 +5538,13 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q %zmm7, %zmm10, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm10, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm10, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm10, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm10, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm4, %zmm10, %zmm3
@@ -5559,20 +5559,20 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm11, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm11, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm11, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm3, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm3, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
 ; AVX512F-NEXT:    vpermt2q %zmm4, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -5959,27 +5959,27 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm19
 ; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm5
 ; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm6
 ; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm7
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [12,1,6,0,12,1,6,0]
 ; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm16
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm15, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, (%rsp) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm9
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm15, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
 ; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm15, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -5989,13 +5989,13 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm9, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm9, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm3
 ; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm3
@@ -6006,13 +6006,13 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm3
 ; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm3
@@ -6027,20 +6027,20 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm3, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm3, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
 ; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm11, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
index 288b7219260c9..c71c688cd32bd 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
@@ -598,84 +598,85 @@ define void @load_i64_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-LABEL: load_i64_stride6_vf8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,6,0,10,0,6,0,10]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm6
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,0,10,0,6,0,10]
+; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,6,12,u>
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    movb $56, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,10,4,10,4,10,4,10]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,6,12,0,0,6,12]
-; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,7,13,0,1,7,13]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm10
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,10,0,6,0,10,0,6]
-; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,11,1,7,0,11,1,7]
-; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [4,10,4,10,4,10,4,10]
+; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
 ; AVX512F-NEXT:    movb $-64, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k2
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,7,0,11,1,7,0,11]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,7,13,u>
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm13 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [5,11,5,11,5,11,5,11]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k2}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [10,4,10,4,10,4,10,4]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = <10,0,6,u>
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,0,11,1,7,0,11]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,7,13,u>
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [5,11,5,11,5,11,5,11]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k2}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [10,4,10,4,10,4,10,4]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm5, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <10,0,6,u>
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
 ; AVX512F-NEXT:    movb $24, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k2
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,6,12,0,0,6,12]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm8
 ; AVX512F-NEXT:    movb $-32, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [11,5,11,5,11,5,11,5]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <11,1,7,u>
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm9 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [12,0,0,6,12,0,0,6]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm6, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [13,0,1,7,13,0,1,7]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [11,5,11,5,11,5,11,5]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm5, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = <11,1,7,u>
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,1,7,13,0,1,7,13]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,10,0,6,0,10,0,6]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm8
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [12,0,0,6,12,0,0,6]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm5, %zmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,10]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm12, %zmm11, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,11,1,7,0,11,1,7]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm8
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [13,0,1,7,13,0,1,7]
+; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = [5,11]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, (%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -683,84 +684,85 @@ define void @load_i64_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-LABEL: load_i64_stride6_vf8:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm4
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,6,0,10,0,6,0,10]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm6
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,0,10,0,6,0,10]
+; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,6,12,u>
 ; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    movb $56, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,10,4,10,4,10,4,10]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,6,12,0,0,6,12]
-; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,7,13,0,1,7,13]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm10
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,10,0,6,0,10,0,6]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,11,1,7,0,11,1,7]
-; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [4,10,4,10,4,10,4,10]
+; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
 ; AVX512BW-NEXT:    movb $-64, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k2
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,7,0,11,1,7,0,11]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,7,13,u>
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm13 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [5,11,5,11,5,11,5,11]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k2}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [10,4,10,4,10,4,10,4]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = <10,0,6,u>
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,0,11,1,7,0,11]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,7,13,u>
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [5,11,5,11,5,11,5,11]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k2}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [10,4,10,4,10,4,10,4]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = <10,0,6,u>
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
 ; AVX512BW-NEXT:    movb $24, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k2
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,0,6,12,0,0,6,12]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm8
 ; AVX512BW-NEXT:    movb $-32, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [11,5,11,5,11,5,11,5]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = <11,1,7,u>
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm9 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [12,0,0,6,12,0,0,6]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm6, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [13,0,1,7,13,0,1,7]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm0, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [11,5,11,5,11,5,11,5]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm10 = <11,1,7,u>
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,1,7,13,0,1,7,13]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,10,0,6,0,10,0,6]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm8
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [12,0,0,6,12,0,0,6]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,10]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm12, %zmm11, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,11,1,7,0,11,1,7]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm8
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [13,0,1,7,13,0,1,7]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [5,11]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1443,12 +1445,12 @@ define void @load_i64_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-LABEL: load_i64_stride6_vf16:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm5
 ; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm8
 ; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm12
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm9
@@ -1465,120 +1467,120 @@ define void @load_i64_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    movb $56, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [4,10,4,10,4,10,4,10]
-; AVX512F-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm16, %zmm15
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [4,10,4,10,4,10,4,10]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm16
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm15, %zmm16
 ; AVX512F-NEXT:    movb $-64, %dil
 ; AVX512F-NEXT:    kmovw %edi, %k2
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm16, %zmm15
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm5, %zmm14
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm1, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm15
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm14
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
 ; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm6 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [1,7,0,11,1,7,0,11]
-; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [1,7,0,11,1,7,0,11]
+; AVX512F-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm18, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm16, %zmm17
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,7,13,u>
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm15
 ; AVX512F-NEXT:    vpermt2q %zmm11, %zmm14, %zmm15
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [5,11,5,11,5,11,5,11]
 ; AVX512F-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm17, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm15 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm17, %zmm19
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm5, %zmm18
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm1, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm14 {%k2}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [10,4,10,4,10,4,10,4]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm17, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm15 {%k2}
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm17
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm16
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm14 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k2}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [10,4,10,4,10,4,10,4]
+; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm18, %zmm19
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <10,0,6,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm16, %zmm17
+; AVX512F-NEXT:    movb $24, %dil
+; AVX512F-NEXT:    kmovw %edi, %k2
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm17 {%k2}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,6,12,0,0,6,12]
+; AVX512F-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm19, %zmm20
+; AVX512F-NEXT:    movb $-32, %dil
+; AVX512F-NEXT:    kmovw %edi, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm17 {%k1}
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm19
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm18
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [11,5,11,5,11,5,11,5]
 ; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm21
 ; AVX512F-NEXT:    vpermt2q %zmm13, %zmm20, %zmm21
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <10,0,6,u>
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <11,1,7,u>
 ; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm19
 ; AVX512F-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
-; AVX512F-NEXT:    movb $24, %dil
-; AVX512F-NEXT:    kmovw %edi, %k2
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm19 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,6,12,0,0,6,12]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,1,7,13,0,1,7,13]
 ; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm22
 ; AVX512F-NEXT:    vpermt2q %zmm8, %zmm21, %zmm22
-; AVX512F-NEXT:    movb $-32, %dil
-; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm19 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm4, %zmm21
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm20
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm18
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm21
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm18
 ; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm18 {%k2}
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm18 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [11,5,11,5,11,5,11,5]
-; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [0,10,0,6,0,10,0,6]
+; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [12,0,0,6,12,0,0,6]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm23
 ; AVX512F-NEXT:    vpermt2q %zmm13, %zmm22, %zmm23
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <11,1,7,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm20, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm21 {%k2}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [0,1,7,13,0,1,7,13]
-; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm23, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm21 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm4, %zmm23
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm22
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm20 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,10,0,6,0,10,0,6]
-; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm22, %zmm23
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [12,0,0,6,12,0,0,6]
-; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm24, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm16, %zmm26
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm26, %zmm25, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm25 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm24
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm1, %zmm16
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm16, %zmm24, %zmm16
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm0, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,11,1,7,0,11,1,7]
-; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm22, %zmm8
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm24 = [4,10]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm24, %zmm25
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm25, %zmm23, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k1}
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm2, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm22
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm24
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm24, %zmm22, %zmm21
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm21 {%k1}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [0,11,1,7,0,11,1,7]
+; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm20, %zmm8
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [13,0,1,7,13,0,1,7]
 ; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2q %zmm13, %zmm12, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm17, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [5,11]
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm13, %zmm9
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm9, %zmm10, %zmm9
 ; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm12, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm17, %zmm1
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm22, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm20, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm12, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm13, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 %zmm6, 64(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm14, 64(%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, 64(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm21, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm16, 64(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm25, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 64(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1586,12 +1588,12 @@ define void @load_i64_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-LABEL: load_i64_stride6_vf16:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm5
 ; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm8
 ; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm12
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm9
@@ -1608,120 +1610,120 @@ define void @load_i64_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    movb $56, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [4,10,4,10,4,10,4,10]
-; AVX512BW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm16, %zmm15
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [4,10,4,10,4,10,4,10]
+; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm16
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm15, %zmm16
 ; AVX512BW-NEXT:    movb $-64, %dil
 ; AVX512BW-NEXT:    kmovd %edi, %k2
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm15
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm14
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm15
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm14
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
 ; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm6 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [1,7,0,11,1,7,0,11]
-; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [1,7,0,11,1,7,0,11]
+; AVX512BW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm18, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm16, %zmm17
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,7,13,u>
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm15
 ; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm14, %zmm15
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [5,11,5,11,5,11,5,11]
 ; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm17, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm15 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm17, %zmm19
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm18
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm14 {%k2}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [10,4,10,4,10,4,10,4]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm17, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm15 {%k2}
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm17
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm16
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm14 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k2}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [10,4,10,4,10,4,10,4]
+; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm19
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm18, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <10,0,6,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm16, %zmm17
+; AVX512BW-NEXT:    movb $24, %dil
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm17 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,0,6,12,0,0,6,12]
+; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm19, %zmm20
+; AVX512BW-NEXT:    movb $-32, %dil
+; AVX512BW-NEXT:    kmovd %edi, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm17 {%k1}
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm19
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm18
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [11,5,11,5,11,5,11,5]
 ; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm21
 ; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm20, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <10,0,6,u>
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <11,1,7,u>
 ; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm19
 ; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
-; AVX512BW-NEXT:    movb $24, %dil
-; AVX512BW-NEXT:    kmovd %edi, %k2
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm19 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,0,6,12,0,0,6,12]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,1,7,13,0,1,7,13]
 ; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm22
 ; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm21, %zmm22
-; AVX512BW-NEXT:    movb $-32, %dil
-; AVX512BW-NEXT:    kmovd %edi, %k1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm19 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm4, %zmm21
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm20
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm18
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm21
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm18
 ; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm18 {%k2}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm18 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [11,5,11,5,11,5,11,5]
-; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [0,10,0,6,0,10,0,6]
+; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [12,0,0,6,12,0,0,6]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm23
 ; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm22, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <11,1,7,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm21 {%k2}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [0,1,7,13,0,1,7,13]
-; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm23, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm21 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm4, %zmm23
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm22
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm20 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,10,0,6,0,10,0,6]
-; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm22, %zmm23
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [12,0,0,6,12,0,0,6]
-; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm24, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm16, %zmm26
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm26, %zmm25, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm25 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm24
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm16
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm16, %zmm24, %zmm16
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm0, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,11,1,7,0,11,1,7]
-; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm22, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm24 = [4,10]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm24, %zmm25
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm25, %zmm23, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k1}
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm2, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm22
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm24
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm24, %zmm22, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm21 {%k1}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [0,11,1,7,0,11,1,7]
+; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm20, %zmm8
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [13,0,1,7,13,0,1,7]
 ; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm12, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [5,11]
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm13, %zmm9
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm9, %zmm10, %zmm9
 ; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm17, %zmm1
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm22, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm20, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm12, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm6, 64(%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm14, 64(%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, 64(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -3230,351 +3232,344 @@ define void @load_i64_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-LABEL: load_i64_stride6_vf32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    subq $2632, %rsp # imm = 0xA48
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm25
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm21
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm20
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm18
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm27
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm25
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,6,0,10,0,6,0,10]
 ; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,10,4,10,4,10,4,10]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm5, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,6,12,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm6, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm21, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,7,0,11,1,7,0,11]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm6, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,7,13,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm8, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm21, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm6, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm21, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,11,5,11,5,11,5,11]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,7,0,11,1,7,0,11]
+; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [10,4,10,4,10,4,10,4]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm21, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,6,12,0,0,6,12]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [11,5,11,5,11,5,11,5]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm21, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,7,13,0,1,7,13]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [12,0,0,6,12,0,0,6]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm8, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [13,0,1,7,13,0,1,7]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm1, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = <10,0,6,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm11, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm31 = <11,1,7,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm31, %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm21, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,10,4,10,4,10,4,10]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,10,0,6,0,10,0,6]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm5, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm8, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,11,5,11,5,11,5,11]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm29
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm29
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,11,1,7,0,11,1,7]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm11, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm8, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm31, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm8, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm3
+; AVX512F-NEXT:    vpermi2q %zmm21, %zmm5, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm7, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm25
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm5, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm30
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm5, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm5, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm15
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm4
-; AVX512F-NEXT:    vpermi2q %zmm19, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm10, %zmm12
-; AVX512F-NEXT:    vpermi2q %zmm19, %zmm4, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm13
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm19, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm31, %zmm14
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm19, %zmm31
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm19, %zmm4, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm0, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm18
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,6,12,0,0,6,12]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm1, %zmm24
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,7,13,0,1,7,13]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm2, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [10,4,10,4,10,4,10,4]
+; AVX512F-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm26, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,10,0,6,0,10,0,6]
-; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm3, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [11,5,11,5,11,5,11,5]
+; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm27, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,11,1,7,0,11,1,7]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [12,0,0,6,12,0,0,6]
+; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm29, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [13,0,1,7,13,0,1,7]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm0, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm1, %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm2, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm28
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm3, %zmm28
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm0, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm1, %zmm27
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm21, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm2, %zmm10
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm21, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm3, %zmm16
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm6, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm31
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm6, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm7, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm27, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm23
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm6, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm18
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm18, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm7, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm18, %zmm2, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm18, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm27, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm18, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm18, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm23
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm19
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,6,12,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm20
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,7,13,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm21
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = <10,0,6,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm6, %zmm22
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = <11,1,7,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm7, %zmm24
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm17 = [4,10]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm17, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm30 = [5,11]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm30, %zmm19
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm7, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm28
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm17, %zmm28
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm30, %zmm4
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm3, %zmm10
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm11
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm7, %zmm12
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm17, %zmm25
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm30, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm30, %zmm5
 ; AVX512F-NEXT:    movb $56, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512F-NEXT:    movb $-64, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm21 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm19 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm0 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512F-NEXT:    movb $24, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
 ; AVX512F-NEXT:    movb $-32, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm13 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm18 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm18 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm14 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm6 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm10, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm25, %zmm10, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm7 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm25, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm9, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm8, %zmm6, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm20, 192(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm15, 128(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm7, 64(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm29, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 192(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, (%rdx)
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm28, %zmm25, %zmm25
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm25 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm17, %zmm29, %zmm17
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm17 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm19, %zmm26, %zmm19
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm19 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm31, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm4 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm18, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm23, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 192(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, 128(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 192(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, (%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, 64(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 128(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 192(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, 128(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 192(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm15, (%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm22, 64(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 128(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm31, 192(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm23, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, 128(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, 128(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 192(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm24, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 128(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 192(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 64(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 128(%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, 64(%rax)
 ; AVX512F-NEXT:    addq $2632, %rsp # imm = 0xA48
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -3582,351 +3577,344 @@ define void @load_i64_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-LABEL: load_i64_stride6_vf32:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    subq $2632, %rsp # imm = 0xA48
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm25
+; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm21
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm20
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm18
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm25
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,6,0,10,0,6,0,10]
 ; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,10,4,10,4,10,4,10]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm5, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,6,12,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm21, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,7,0,11,1,7,0,11]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,7,13,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm21, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm21, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,11,5,11,5,11,5,11]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,7,0,11,1,7,0,11]
+; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [10,4,10,4,10,4,10,4]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,0,6,12,0,0,6,12]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [11,5,11,5,11,5,11,5]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,7,13,0,1,7,13]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [12,0,0,6,12,0,0,6]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [13,0,1,7,13,0,1,7]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm2, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm11 = <10,0,6,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm31 = <11,1,7,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm31, %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [4,10,4,10,4,10,4,10]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,10,0,6,0,10,0,6]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm5, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm8, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,11,5,11,5,11,5,11]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,11,1,7,0,11,1,7]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm11, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm8, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm31, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm8, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm5, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm5, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm4
-; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm12
-; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm4, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm13
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm19, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm31, %zmm14
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm19, %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm4, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm0, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm18
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,6,12,0,0,6,12]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm1, %zmm24
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,7,13,0,1,7,13]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm2, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [10,4,10,4,10,4,10,4]
+; AVX512BW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm26, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,10,0,6,0,10,0,6]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm3, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [11,5,11,5,11,5,11,5]
+; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm27, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,11,1,7,0,11,1,7]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [12,0,0,6,12,0,0,6]
+; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm29, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [13,0,1,7,13,0,1,7]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm28
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm28
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm27
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm21, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm2, %zmm10
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm21, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm3, %zmm16
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm31
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm6, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm7, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm27, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm29, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm6, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm18
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm2
+; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm7, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm2, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm18, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm27, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm18, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm29, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm18, %zmm29
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm19
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,6,12,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm20
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,7,13,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm21
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = <10,0,6,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm6, %zmm22
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <11,1,7,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm7, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = [4,10]
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm30 = [5,11]
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm30, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm17, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm30, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm3, %zmm10
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm11
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm17, %zmm25
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm30, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm5
 ; AVX512BW-NEXT:    movb $56, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512BW-NEXT:    movb $-64, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k2
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm21 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm19 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512BW-NEXT:    movb $24, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k2
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
 ; AVX512BW-NEXT:    movb $-32, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm13 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm18 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm18 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm14 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm6 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm10, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm25, %zmm10, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm7 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm25, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm9, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm8, %zmm6, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, 192(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, 128(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm7, 64(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 192(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%rdx)
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm28, %zmm25, %zmm25
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm25 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm17, %zmm29, %zmm17
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm19, %zmm26, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm19 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm31, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm4 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm18, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm23, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 192(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, 128(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 64(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 192(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 128(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 192(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 128(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 192(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, (%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm22, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 128(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, 192(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, 128(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 192(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, 128(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 192(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 128(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, 192(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 64(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 128(%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 64(%rax)
 ; AVX512BW-NEXT:    addq $2632, %rsp # imm = 0xA48
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -7117,755 +7105,748 @@ define void @load_i64_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-LABEL: load_i64_stride6_vf64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    subq $7240, %rsp # imm = 0x1C48
-; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm26
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,0,10,0,6,0,10]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2240(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm26
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [4,10,4,10,4,10,4,10]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm30, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm30, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm7, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm30, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm30, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,0,11,1,7,0,11]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [5,11,5,11,5,11,5,11]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm8, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm8, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm8, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm8, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm8, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm8, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [10,4,10,4,10,4,10,4]
-; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,6,12,0,0,6,12]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm9, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm9, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm9, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm9, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm9, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm9, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,5,11,5,11,5,11,5]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,7,13,0,1,7,13]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm10, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm10, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm10, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm10, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm10, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm10, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm10, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [12,0,0,6,12,0,0,6]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,10,0,6,0,10,0,6]
 ; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm11, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm13
 ; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [13,0,1,7,13,0,1,7]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm12
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,11,1,7,0,11,1,7]
+; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm11, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm2, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0
 ; AVX512F-NEXT:    vpermt2q %zmm26, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm2, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm3, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm30, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm8, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm9, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm10, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm10, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2432(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 2496(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm30, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2816(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 2880(%rdi), %zmm1
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm1, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 3008(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 2944(%rdi), %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm1, %zmm30
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermi2q %zmm4, %zmm1, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm9, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm4, %zmm9
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm1, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm10, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm4, %zmm10
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm1, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm4, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = <0,6,12,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <1,7,13,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = <10,0,6,u>
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm1
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,6,0,10,0,6,0,10]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm10, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm22, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = <11,1,7,u>
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [1,7,0,11,1,7,0,11]
+; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [4,10,4,10,4,10,4,10]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [10,4,10,4,10,4,10,4]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,11,5,11,5,11,5,11]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [11,5,11,5,11,5,11,5]
+; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [12,0,0,6,12,0,0,6]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [13,0,1,7,13,0,1,7]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm2
+; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm10, %zmm2
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2432(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 2496(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm11, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm30
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm26
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm24
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm28
-; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2240(%rdi), %zmm23
-; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2368(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 2304(%rdi), %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm29
-; AVX512F-NEXT:    vmovdqa64 3008(%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 2944(%rdi), %zmm31
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm4, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm22, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2752(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 2688(%rdi), %zmm12
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm12, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm12, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm10, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm12, %zmm2, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm12, %zmm2, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm12, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,6,12,0,0,6,12]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,7,13,0,1,7,13]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,10,0,6,0,10,0,6]
-; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,11,1,7,0,11,1,7]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm2, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm2, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm1, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqa64 2816(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 2880(%rdi), %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm2, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm28, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm3, %zmm6
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm2, %zmm28
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm1, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm2, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm30
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm1, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm2, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm3, %zmm6
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm25
+; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm6
-; AVX512F-NEXT:    vpermi2q %zmm14, %zmm31, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm7
-; AVX512F-NEXT:    vpermi2q %zmm14, %zmm31, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm16
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm3, %zmm16
-; AVX512F-NEXT:    vpermi2q %zmm31, %zmm14, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm26
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,6,12,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,7,13,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = <10,0,6,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = <11,1,7,u>
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,10]
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [5,11]
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm26
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm31
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm31
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm30
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm27
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm29, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm29, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm29
+; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm23
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm4, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm18
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm6, %zmm21
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm25, %zmm7
+; AVX512F-NEXT:    vmovdqa64 2368(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 2304(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm2, %zmm10
+; AVX512F-NEXT:    vmovdqa64 2752(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 2688(%rdi), %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm3, %zmm11
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm4, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm15
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm6, %zmm15
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm8, %zmm5
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm25, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm25, %zmm9
 ; AVX512F-NEXT:    movb $56, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
 ; AVX512F-NEXT:    movb $-64, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm30, (%rsp) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm2 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm3 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512F-NEXT:    movb $24, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
 ; AVX512F-NEXT:    movb $-32, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm27 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm28 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm22 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 16-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm6 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm7 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm13 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm2, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm16 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm16 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm18 # 16-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm18 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm30, %zmm19, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm19 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm12, %zmm21, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm12 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm22, %zmm14, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 448(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm29, 384(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 320(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 256(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 192(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 128(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 64(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, 448(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 256(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 320(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 128(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 192(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, (%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 64(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 384(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, 448(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 256(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 320(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 128(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 192(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, (%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 64(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 384(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 448(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm27, 256(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm31, 320(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 128(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 192(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, (%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 64(%r8)
-; AVX512F-NEXT:    vmovups (%rsp), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm8, 384(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 448(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm15, 256(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 320(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 128(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 192(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm0, 384(%r9)
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm8, %zmm30, %zmm8
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm26, %zmm30, %zmm26
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm26 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm31, %zmm30, %zmm31
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm27, %zmm30, %zmm27
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm27 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm29, %zmm30, %zmm29
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm29 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm18, %zmm30, %zmm18
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm18 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm7, %zmm30, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm30, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 448(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, 384(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 320(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 256(%rsi)
+; AVX512F-NEXT:    vmovups (%rsp), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 192(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 128(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 448(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 256(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 320(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 128(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 192(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, (%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, 384(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 448(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, 256(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm24, 320(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 128(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 192(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, (%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, 384(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 448(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 256(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 320(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 128(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 192(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, (%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm15, 384(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, 448(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 256(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 320(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 128(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, 192(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, 64(%r9)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 384(%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm14, 384(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 448(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 256(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 384(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 448(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 256(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, 320(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm29, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm27, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm31, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm26, 64(%rax)
 ; AVX512F-NEXT:    addq $7240, %rsp # imm = 0x1C48
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -7873,755 +7854,748 @@ define void @load_i64_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-LABEL: load_i64_stride6_vf64:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    subq $7240, %rsp # imm = 0x1C48
-; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,0,10,0,6,0,10]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2240(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm26
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [4,10,4,10,4,10,4,10]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm30, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,7,0,11,1,7,0,11]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [5,11,5,11,5,11,5,11]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm8, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm8, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm8, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm8, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm8, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [10,4,10,4,10,4,10,4]
-; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm10
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,6,12,0,0,6,12]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm9, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm9, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm9, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,5,11,5,11,5,11,5]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,7,13,0,1,7,13]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm10, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [12,0,0,6,12,0,0,6]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,10,0,6,0,10,0,6]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm13
 ; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [13,0,1,7,13,0,1,7]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm12
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,11,1,7,0,11,1,7]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm2, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
 ; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0
 ; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm3, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm30, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm9, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
 ; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2432(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 2496(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2816(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 2880(%rdi), %zmm1
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 3008(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 2944(%rdi), %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm30
+; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm9, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm9, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm9
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm10
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
 ; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = <0,6,12,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = <1,7,13,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm10 = <10,0,6,u>
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm1
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,6,0,10,0,6,0,10]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm22, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm11 = <11,1,7,u>
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [1,7,0,11,1,7,0,11]
+; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [4,10,4,10,4,10,4,10]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [10,4,10,4,10,4,10,4]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,11,5,11,5,11,5,11]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [11,5,11,5,11,5,11,5]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [12,0,0,6,12,0,0,6]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [13,0,1,7,13,0,1,7]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm10, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm22, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2432(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 2496(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm11, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm22, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm24
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2240(%rdi), %zmm23
-; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2368(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 2304(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 3008(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 2944(%rdi), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2752(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 2688(%rdi), %zmm12
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm10, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm12, %zmm2, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm12, %zmm2, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,6,12,0,0,6,12]
-; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,7,13,0,1,7,13]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,10,0,6,0,10,0,6]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,11,1,7,0,11,1,7]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm2, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 2816(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 2880(%rdi), %zmm2
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm2, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm28, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm2, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm30, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm30
+; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm2, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm25
+; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm31, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm7
-; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm31, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm16
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm14, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm26
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,6,12,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,7,13,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm4 = <10,0,6,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = <11,1,7,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,10]
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [5,11]
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm31
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm30
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm24
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm4, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm19
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 2368(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 2304(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 2752(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 2688(%rdi), %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm3, %zmm11
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm4, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm15
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm8, %zmm5
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm25, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm25, %zmm9
 ; AVX512BW-NEXT:    movb $56, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
 ; AVX512BW-NEXT:    movb $-64, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm30, (%rsp) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm2 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm3 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512BW-NEXT:    movb $24, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k2
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
 ; AVX512BW-NEXT:    movb $-32, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm27 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm28 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm22 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 16-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm6 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm7 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm13 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm15 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm2, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm16 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm16 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm18 # 16-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm18 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm30, %zmm19, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm19 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm12, %zmm21, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm12 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm22, %zmm14, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, 448(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, 384(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 320(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 256(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 192(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 128(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, 448(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 256(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 320(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 128(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 192(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, (%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 64(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 384(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, 448(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 256(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 320(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 128(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 192(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, (%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 64(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 384(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 448(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, 256(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, 320(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 128(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 192(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, (%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 64(%r8)
-; AVX512BW-NEXT:    vmovups (%rsp), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm8, 384(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 448(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, 256(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 320(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 128(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 192(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, 384(%r9)
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm8, %zmm30, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm26, %zmm30, %zmm26
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm26 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm31, %zmm30, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm27, %zmm30, %zmm27
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm27 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm29, %zmm30, %zmm29
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm29 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm18, %zmm30, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm18 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm7, %zmm30, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm30, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 448(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 384(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 320(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 256(%rsi)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 448(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, 256(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 320(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, (%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, 384(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 448(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 256(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 320(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 128(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, (%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, 384(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 448(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 256(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 320(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 128(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 192(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, (%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, 384(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, 448(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 256(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 320(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, 128(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, 192(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 64(%r9)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 384(%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, 384(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 448(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 256(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 384(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 448(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 256(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm18, 320(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, 64(%rax)
 ; AVX512BW-NEXT:    addq $7240, %rsp # imm = 0x1C48
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
index 7d9c056716cee..80e41f6daac0f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
@@ -751,111 +751,113 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512F-LABEL: load_i64_stride7_vf8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm3
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,7,14,u>
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm0
-; AVX512F-NEXT:    movb $24, %al
-; AVX512F-NEXT:    kmovw %eax, %k2
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm7[4,5,4,5],zmm6[4,5,4,5]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT:    movb $24, %r11b
+; AVX512F-NEXT:    kmovw %r11d, %k2
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm5[4,5,4,5],zmm3[4,5,4,5]
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [10,3,10,3,10,3,10,3]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm8
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm8
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,6,9,0,5,6,9]
 ; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm8, %zmm9
-; AVX512F-NEXT:    movb $-32, %al
-; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm8, %zmm9
+; AVX512F-NEXT:    movb $-32, %r11b
+; AVX512F-NEXT:    kmovw %r11d, %k1
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [11,4,11,4,11,4,11,4]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm8
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,5,6,10,0,5,6,10]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm8, %zmm10
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,6,13,6,13,6,13,6]
-; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [14,0,0,7,14,0,0,7]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm8
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,6,10,0,5,6,10]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm8, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm5, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = <9,0,7,u>
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [12,5,12,5,12,5,12,5]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,5,6,11,0,5,6,11]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm9, %zmm10
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [14,0,0,7,14,0,0,7]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm5, %zmm9
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm11
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm11, %zmm8, %zmm8
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [7,0,9,0,7,0,9,0]
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm11, %zmm9, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm10
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,5,6,12,0,5,6,12]
 ; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm10, %zmm11
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [7,0,9,0,7,0,9,0]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm10
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm12
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm15
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm13
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm12
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0,1,2,3,4,5],ymm13[6,7]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,11]
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm14
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,11,4,11]
-; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm9, %zmm7
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = <9,0,7,u>
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm6 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [12,5,12,5,12,5,12,5]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,5,6,11,0,5,6,11]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm7, %zmm10
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,5,6,12,0,5,6,12]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm9, %zmm7
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm11, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k1}
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,7,14,0,0,7,14,0]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,5,6,13,4,5,6,13]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm7, %zmm10
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm10, %zmm7
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [9,0,7,0,9,0,7,0]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm4, %zmm10
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,5,6,14,4,5,6,14]
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm12, %zmm10, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k1}
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,7,14,0,0,7,14,0]
 ; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm10, %zmm11
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm10
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm10 = ymm12[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = [5,12]
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm12
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,9,2,9,2,9,2,9]
-; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm11
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm11, %zmm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,13]
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm11
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,5,6,13,4,5,6,13]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm11, %zmm12
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm11
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3,4,5],ymm11[6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,11]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm12, %zmm11
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [9,0,7,0,9,0,7,0]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm12
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,5,6,14,4,5,6,14]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm12, %zmm14
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm12
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm12 = ymm13[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [5,12]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm14, %zmm12
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [2,9,2,9,2,9,2,9]
+; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm7, %zmm13
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,5,8,15,4,5,8,15]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm13, %zmm6
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [4,11,4,11]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [6,13]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm6, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, (%r10)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, (%r10)
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -864,109 +866,111 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,7,14,u>
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm0
 ; AVX512BW-NEXT:    movb $24, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k2
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm7[4,5,4,5],zmm6[4,5,4,5]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm5[4,5,4,5],zmm3[4,5,4,5]
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [10,3,10,3,10,3,10,3]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm8
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm8
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,6,9,0,5,6,9]
 ; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm8, %zmm9
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm9
 ; AVX512BW-NEXT:    movb $-32, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [11,4,11,4,11,4,11,4]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm8
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,5,6,10,0,5,6,10]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm8, %zmm10
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,6,13,6,13,6,13,6]
-; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [14,0,0,7,14,0,0,7]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm8
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,6,10,0,5,6,10]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = <9,0,7,u>
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [12,5,12,5,12,5,12,5]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,5,6,11,0,5,6,11]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm9, %zmm10
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [14,0,0,7,14,0,0,7]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm9
 ; AVX512BW-NEXT:    vmovdqa 16(%rdi), %xmm11
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm11, %zmm8, %zmm8
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [7,0,9,0,7,0,9,0]
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm11, %zmm9, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm10
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,5,6,12,0,5,6,12]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm13
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm12
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0,1,2,3,4,5],ymm13[6,7]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,11]
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,11,4,11]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm9, %zmm7
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = <9,0,7,u>
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm6 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [12,5,12,5,12,5,12,5]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,5,6,11,0,5,6,11]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm7, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm9
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,5,6,12,0,5,6,12]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm9, %zmm7
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm9, %zmm11, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k1}
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,7,14,0,0,7,14,0]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,5,6,13,4,5,6,13]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm7, %zmm10
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm10, %zmm7
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [9,0,7,0,9,0,7,0]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm10, %zmm11
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [7,0,9,0,7,0,9,0]
 ; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm10
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,5,6,14,4,5,6,14]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm10
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm12
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm12, %zmm10, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k1}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,7,14,0,0,7,14,0]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm10, %zmm11
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm10
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm10 = ymm12[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [5,12]
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm12
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,9,2,9,2,9,2,9]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm11
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm11, %zmm4
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,13]
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm11
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,5,6,13,4,5,6,13]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm11, %zmm12
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm11
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3,4,5],ymm11[6,7]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,11]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm12, %zmm11
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [9,0,7,0,9,0,7,0]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm12
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,5,6,14,4,5,6,14]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm12, %zmm14
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm12
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm12 = ymm13[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [5,12]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm14, %zmm12
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [2,9,2,9,2,9,2,9]
+; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm13
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,5,8,15,4,5,8,15]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm13, %zmm6
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [4,11,4,11]
+; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [6,13]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm6, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r10)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%r10)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
@@ -1725,12 +1729,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm28
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm30
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm6
 ; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm24
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm2
 ; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm14
 ; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm15
 ; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm9
@@ -1754,9 +1758,9 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    movb $-32, %r11b
 ; AVX512F-NEXT:    kmovw %r11d, %k1
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm29 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm24, %zmm6, %zmm7
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm2[4,5,4,5],zmm25[4,5,4,5]
-; AVX512F-NEXT:    vpermi2q %zmm30, %zmm0, %zmm16
+; AVX512F-NEXT:    vpermi2q %zmm24, %zmm2, %zmm7
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm3[4,5,4,5],zmm25[4,5,4,5]
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm16
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm18, %zmm16
 ; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k1}
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [11,4,11,4,11,4,11,4]
@@ -1766,21 +1770,20 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [0,5,6,10,0,5,6,10]
 ; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2q %zmm14, %zmm20, %zmm19
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [13,6,13,6,13,6,13,6]
-; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm22, %zmm21
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm21, %zmm22
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <9,0,7,u>
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm17
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm16, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm17 {%k2}
 ; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm17 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm30, %zmm0, %zmm18
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm18
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm20, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm22, %zmm19
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm24, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
+; AVX512F-NEXT:    vpermi2q %zmm25, %zmm3, %zmm21
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm24, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm16 {%k2}
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm16 {%k1}
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
 ; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1789,26 +1792,29 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,5,6,11,0,5,6,11]
 ; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2q %zmm14, %zmm21, %zmm19
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [14,0,0,7,14,0,0,7]
-; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [14,0,0,7,14,0,0,7]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm23, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm22, %zmm18
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm4
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm18, %zmm18
 ; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm18 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm30, %zmm0, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm20
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm21, %zmm20
-; AVX512F-NEXT:    vpermi2q %zmm25, %zmm2, %zmm23
+; AVX512F-NEXT:    vpermi2q %zmm25, %zmm3, %zmm22
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 464(%rdi), %xmm4
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm23, %zmm19
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm22, %zmm19
 ; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm19 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm22, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,5,6,12,0,5,6,12]
-; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm21, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm4, %zmm21
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,5,6,12,0,5,6,12]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm22, %zmm21
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [7,0,9,0,7,0,9,0]
 ; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm20
@@ -1817,15 +1823,15 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm5 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm5
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm20, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm30, %zmm0, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm21, %zmm22
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm25, %zmm23
-; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm4
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm4 = mem[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm4, %xmm4
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm4, %zmm23, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm21 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm20 {%k1}
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm22, %zmm4
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm25, %zmm23
+; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm5
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm5 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm23, %zmm21
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k1}
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,7,14,0,0,7,14,0]
 ; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm5
@@ -1841,12 +1847,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q %zmm26, %zmm3, %zmm11
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm5, %zmm22
-; AVX512F-NEXT:    vpermi2q %zmm30, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm4
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm23, %zmm4
 ; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm5
 ; AVX512F-NEXT:    vmovdqa 576(%rdi), %ymm11
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1,2,3,4,5],ymm5[6,7]
-; AVX512F-NEXT:    vpermi2q %zmm24, %zmm6, %zmm3
+; AVX512F-NEXT:    vpermi2q %zmm24, %zmm2, %zmm3
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm23
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [9,0,7,0,9,0,7,0]
@@ -1862,12 +1868,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm8
 ; AVX512F-NEXT:    vpermt2q %zmm26, %zmm13, %zmm8
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm4, %zmm31
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm30, %zmm3
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm4, %zmm4
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm6, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm5, %zmm3
 ; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm5
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm5 = ymm11[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vpermi2q %zmm24, %zmm6, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm24, %zmm2, %zmm13
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm3, %zmm3
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,9,2,9,2,9,2,9]
@@ -1883,11 +1889,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm5, %zmm0
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm8, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm11, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm12, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm11, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm12, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, 64(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm29, (%rsi)
@@ -1900,7 +1907,7 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqa64 %zmm23, 64(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm22, (%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%r10)
-; AVX512F-NEXT:    vmovdqa64 %zmm31, (%r10)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, (%r10)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512F-NEXT:    vzeroupper
@@ -1911,12 +1918,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm31
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm6
 ; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm24
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm15
 ; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm9
@@ -1940,9 +1947,9 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    movb $-32, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm29 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm6, %zmm7
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm2[4,5,4,5],zmm26[4,5,4,5]
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm0, %zmm16
+; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm2, %zmm7
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm3[4,5,4,5],zmm26[4,5,4,5]
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm16
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm18, %zmm16
 ; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k1}
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [11,4,11,4,11,4,11,4]
@@ -1952,21 +1959,20 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [0,5,6,10,0,5,6,10]
 ; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm20, %zmm19
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [13,6,13,6,13,6,13,6]
-; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm22, %zmm21
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm21, %zmm22
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <9,0,7,u>
 ; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm17
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm16, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm17 {%k2}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm17 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm0, %zmm18
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm18
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm20, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm22, %zmm19
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm24, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm3, %zmm21
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm24, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm16 {%k2}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm16 {%k1}
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
 ; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -1975,26 +1981,29 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,5,6,11,0,5,6,11]
 ; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm21, %zmm19
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [14,0,0,7,14,0,0,7]
-; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [14,0,0,7,14,0,0,7]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm23, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm22, %zmm18
 ; AVX512BW-NEXT:    vmovdqa 16(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm18, %zmm18
 ; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm18 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm0, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm20
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm21, %zmm20
-; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm2, %zmm23
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm3, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa 464(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm23, %zmm19
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm22, %zmm19
 ; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm19 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm22, %zmm4
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,5,6,12,0,5,6,12]
-; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm21, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm21
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [0,5,6,12,0,5,6,12]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm22, %zmm21
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [7,0,9,0,7,0,9,0]
 ; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm20
@@ -2003,15 +2012,15 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm25 = mem[8,9,10,11,12,13,14,15],ymm25[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm25[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm25, %xmm25
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm25, %zmm20, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm0, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm21, %zmm22
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm26, %zmm23
-; AVX512BW-NEXT:    vmovdqa 512(%rdi), %ymm4
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm4 = mem[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm4, %xmm4
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm23, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm21 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm20 {%k1}
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm22, %zmm4
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm26, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %ymm21
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm21 = mem[8,9,10,11,12,13,14,15],ymm21[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm21[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm21, %xmm21
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm21, %zmm23, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k1}
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,7,14,0,0,7,14,0]
 ; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm22
@@ -2027,12 +2036,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm11
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm22, %zmm22
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm4
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm23, %zmm4
 ; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm5
 ; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm11
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1,2,3,4,5],ymm5[6,7]
-; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm6, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm2, %zmm13
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm23
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [9,0,7,0,9,0,7,0]
@@ -2049,11 +2058,11 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm8
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm5, %zmm5
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm31, %zmm4
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm6, %zmm4
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm13, %zmm4
 ; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm8
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm8 = ymm11[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm2, %zmm3
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
@@ -2069,11 +2078,12 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm12, %zmm1
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm4, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm8, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm12, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm12, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm29, (%rsi)
@@ -3681,429 +3691,420 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-LABEL: load_i64_stride7_vf32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    subq $2216, %rsp # imm = 0x8A8
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovaps 1024(%rdi), %zmm0
-; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
-; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm16
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
-; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm22
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm15
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm8
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm24
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm23
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [10,3,10,3,10,3,10,3]
 ; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm24
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm15
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
-; AVX512F-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm4, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm4, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm4, %zmm7
+; AVX512F-NEXT:    vpermi2q %zmm22, %zmm0, %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [11,4,11,4,11,4,11,4]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm31
+; AVX512F-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [12,5,12,5,12,5,12,5]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm9, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm9, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm9, %zmm16
+; AVX512F-NEXT:    vpermi2q %zmm22, %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm9, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm9, %zmm19
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm9, %zmm21
+; AVX512F-NEXT:    vpermi2q %zmm22, %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,7,14,0,0,7,14,0]
+; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm27, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm27, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm27, %zmm9
+; AVX512F-NEXT:    vpermi2q %zmm22, %zmm0, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm30, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm28
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,9,2,9,2,9,2,9]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm30, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm30, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm22, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm22
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm0
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,5,6,9,0,5,6,9]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
-; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,5,6,10,0,5,6,10]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm29
+; AVX512F-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm31
+; AVX512F-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,5,6,11,0,5,6,11]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm13
 ; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm14
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm17
 ; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,5,6,12,0,5,6,12]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm18
+; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm19
+; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm21
+; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm25
 ; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
-; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,5,6,13,4,5,6,13]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm11
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm27
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,5,6,14,4,5,6,14]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm20
+; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm28
+; AVX512F-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm30
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,5,8,15,4,5,8,15]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm26
 ; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm23
+; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
+; AVX512F-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512F-NEXT:    movb $24, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
-; AVX512F-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,11,4,11]
-; AVX512F-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
-; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm15, %zmm20
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm2
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm2[4,5,4,5],zmm0[4,5,4,5]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm31
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm17, %zmm31
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [14,0,0,7,14,0,0,7]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [7,0,9,0,7,0,9,0]
+; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm18, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,11,4,11]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa 912(%rdi), %xmm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm22 {%k1} = zmm4[4,5,4,5],zmm0[4,5,4,5]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm17, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm23
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm18, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm15, %zmm16
+; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm10
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm10, %zmm15
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm7
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm7[4,5,4,5],zmm0[4,5,4,5]
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm28
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm17, %zmm21
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm7, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm29
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm7, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm18, %zmm24
+; AVX512F-NEXT:    vpermi2q %zmm7, %zmm0, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm7
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm6[4,5,4,5],zmm28[4,5,4,5]
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm14, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa 912(%rdi), %xmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm29, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 464(%rdi), %xmm0
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm8, %zmm29
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm0
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm28
 ; AVX512F-NEXT:    vmovdqa 1360(%rdi), %xmm0
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm0, %zmm1, %zmm23
 ; AVX512F-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa 1024(%rdi), %ymm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512F-NEXT:    vmovdqa 1024(%rdi), %ymm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,11]
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm8, %zmm3
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 576(%rdi), %ymm6
+; AVX512F-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm8, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm6
+; AVX512F-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa 1472(%rdi), %ymm3
 ; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm10, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 1088(%rdi), %ymm0
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqa64 %ymm0, %ymm27
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
-; AVX512F-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqa 960(%rdi), %ymm15
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
-; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm15
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [5,12]
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [6,13]
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm3, %zmm11
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm6, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm10, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqa 960(%rdi), %ymm12
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti32x4 $1, %ymm12, %xmm19
+; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm12
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm10, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm3, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm3, %zmm11
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    movb $-32, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm22 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm20 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm15 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm2 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm13 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm13 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm21 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm29 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm28 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k2}
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
-; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm15
-; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqa 1408(%rdi), %ymm7
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm15
-; AVX512F-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512F-NEXT:    vmovdqa 1536(%rdi), %ymm15
-; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rsi)
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm12, %zmm4, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %ymm27, %ymm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm11
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm11 = mem[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm11
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm11, %zmm26, %zmm11
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm11 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm17
+; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm12
+; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqa 1408(%rdi), %ymm5
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm5 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm5, %zmm18, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm5 {%k2}
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm18
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm12
+; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm8, %zmm12
+; AVX512F-NEXT:    vmovdqa 1536(%rdi), %ymm14
+; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm14 = mem[8,9,10,11,12,13,14,15],ymm14[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm14[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm30, %zmm1
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm14 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm14 = ymm6[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm14, %zmm8, %zmm14
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm8 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm8 = ymm6[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm8, %zmm19, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm10, %zmm7
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm6 = ymm6[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm6, %zmm10, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm15, 192(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm26, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 128(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 192(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, (%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 64(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 128(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm28, 192(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 128(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 128(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 192(%r9)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, (%r9)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -4111,444 +4112,439 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 128(%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 128(%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512F-NEXT:    vmovaps %zmm15, 64(%rax)
+; AVX512F-NEXT:    vmovaps %zmm6, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 192(%rax)
+; AVX512F-NEXT:    vmovaps %zmm8, (%rax)
+; AVX512F-NEXT:    vmovaps %zmm14, 64(%rax)
 ; AVX512F-NEXT:    addq $2216, %rsp # imm = 0x8A8
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i64_stride7_vf32:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $2152, %rsp # imm = 0x868
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovaps 576(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
-; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm24
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
-; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
+; AVX512BW-NEXT:    subq $2024, %rsp # imm = 0x7E8
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm22
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm15
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm21
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm24
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [10,3,10,3,10,3,10,3]
 ; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm5
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
-; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [11,4,11,4,11,4,11,4]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm31
+; AVX512BW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [12,5,12,5,12,5,12,5]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm17
+; AVX512BW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm18
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm26
+; AVX512BW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,7,14,0,0,7,14,0]
+; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm27, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm22, %zmm4, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm29
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [9,0,7,0,9,0,7,0]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm30
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm25, %zmm30
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [2,9,2,9,2,9,2,9]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm25
+; AVX512BW-NEXT:    vpermi2q %zmm29, %zmm22, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,6,9,0,5,6,9]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,6,10,0,5,6,10]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm13
 ; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,6,11,0,5,6,11]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm17
 ; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,6,12,0,5,6,12]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm26
 ; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm28
+; AVX512BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,5,6,13,4,5,6,13]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm27
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,5,6,14,4,5,6,14]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm30
 ; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,5,8,15,4,5,8,15]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm29
+; AVX512BW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512BW-NEXT:    movb $24, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm15
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
-; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm15, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm0
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm0[4,5,4,5],zmm1[4,5,4,5]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [4,11,4,11]
-; AVX512BW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm26
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [14,0,0,7,14,0,0,7]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm13
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [7,0,9,0,7,0,9,0]
+; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm24 = [4,11,4,11]
+; AVX512BW-NEXT:    # ymm24 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm0
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm22 {%k1} = zmm0[4,5,4,5],zmm1[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm16
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm11
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm11, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm8
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[4,5,4,5],zmm1[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm29
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm19
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm17, %zmm19
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm8, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm9, %zmm31
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm8, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm18, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm1, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm8
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm6[4,5,4,5],zmm29[4,5,4,5]
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm24, %zmm6
 ; AVX512BW-NEXT:    vmovdqa 912(%rdi), %xmm1
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm31, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
-; AVX512BW-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
-; AVX512BW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %ymm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa 464(%rdi), %xmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm13, %zmm31
+; AVX512BW-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm28, %zmm29
+; AVX512BW-NEXT:    vmovdqa 1360(%rdi), %xmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm9, %zmm28
+; AVX512BW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm1
+; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %ymm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,11]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm1
+; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm9, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm1
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 1472(%rdi), %ymm1
+; AVX512BW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm11, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm27
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm7
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
-; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %ymm5
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [6,13]
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm11, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm14
+; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm2 = ymm24[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm24[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm25
 ; AVX512BW-NEXT:    movb $-32, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
-; AVX512BW-NEXT:    vmovdqa 960(%rdi), %ymm6
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
-; AVX512BW-NEXT:    vmovdqa 512(%rdi), %ymm7
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %ymm20
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %ymm20
-; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm3 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm28 {%k2}
+; AVX512BW-NEXT:    vmovdqa 960(%rdi), %ymm4
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm4 = mem[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm4, %zmm20, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k2}
+; AVX512BW-NEXT:    vmovdqa 512(%rdi), %ymm5
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm5 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm5, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm12
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm12, %zmm30, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %ymm17
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm17 = mem[8,9,10,11,12,13,14,15],ymm17[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm17[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm17, %xmm17
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm17, %zmm18, %zmm17
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %ymm18
+; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm18[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm18[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm18
 ; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm13
 ; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm2, %zmm9
 ; AVX512BW-NEXT:    vmovdqa 1536(%rdi), %ymm13
 ; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm13 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm13, %zmm2, %zmm13
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm2, %zmm19, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm11[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm11, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm11, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, 192(%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 64(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 128(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, 192(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 128(%rcx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, 192(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 192(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 128(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, 192(%r9)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, (%r9)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -4557,15 +4553,15 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovaps %zmm0, 128(%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 128(%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-NEXT:    addq $2152, %rsp # imm = 0x868
+; AVX512BW-NEXT:    vmovaps %zmm2, (%rax)
+; AVX512BW-NEXT:    vmovaps %zmm13, 64(%rax)
+; AVX512BW-NEXT:    addq $2024, %rsp # imm = 0x7E8
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <224 x i64>, ptr %in.vec, align 64
@@ -8015,45 +8011,44 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-LABEL: load_i64_stride7_vf64:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $6728, %rsp # imm = 0x1A48
-; AVX512F-NEXT:    vmovdqa64 3328(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3264(%rdi), %zmm3
+; AVX512F-NEXT:    subq $6216, %rsp # imm = 0x1848
+; AVX512F-NEXT:    vmovdqa64 3328(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3008(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 2944(%rdi), %zmm22
+; AVX512F-NEXT:    vmovdqa64 3264(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 3008(%rdi), %zmm28
+; AVX512F-NEXT:    vmovdqa64 2944(%rdi), %zmm25
 ; AVX512F-NEXT:    vmovdqa64 2880(%rdi), %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2816(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2752(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20
-; AVX512F-NEXT:    vmovdqa64 2688(%rdi), %zmm15
-; AVX512F-NEXT:    vmovdqa64 2432(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2368(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 2752(%rdi), %zmm20
+; AVX512F-NEXT:    vmovdqa64 2688(%rdi), %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2432(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2368(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm11
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm14
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm14
 ; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
@@ -8061,248 +8056,248 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa 464(%rdi), %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa 1360(%rdi), %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa 912(%rdi), %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa 2256(%rdi), %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqa 1808(%rdi), %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm17, %zmm3, %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm0
 ; AVX512F-NEXT:    vmovdqa 3152(%rdi), %xmm1
 ; AVX512F-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
 ; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 2880(%rdi), %ymm0, %ymm1
-; AVX512F-NEXT:    vmovdqa 2816(%rdi), %ymm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,11]
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm5, %zmm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,7,14,0,0,7,14,0]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm18
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqa64 3072(%rdi), %zmm0
+; AVX512F-NEXT:    vinserti128 $1, 2880(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 2816(%rdi), %ymm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,11]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm6, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm19
+; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,7,14,0,0,7,14,0]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm5, %zmm2
+; AVX512F-NEXT:    vmovdqa64 3072(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [4,5,6,13,4,5,6,13]
+; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm27, %zmm2
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [4,5,6,13,4,5,6,13]
-; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm2
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm1
+; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa 576(%rdi), %ymm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm28
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm14
 ; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm1
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm30
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm1
-; AVX512F-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm5, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm4, %zmm6
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm6
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm6, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm1
-; AVX512F-NEXT:    vmovdqa 1024(%rdi), %ymm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm5, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm4, %zmm8
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm27, %zmm3
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm8
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 2432(%rdi), %ymm0, %ymm1
-; AVX512F-NEXT:    vmovdqa 2368(%rdi), %ymm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 2304(%rdi), %zmm0
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm21
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm6, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm27, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2240(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 2496(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm4, %zmm8
-; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm9, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 1984(%rdi), %ymm0, %ymm1
-; AVX512F-NEXT:    vmovdqa 1920(%rdi), %ymm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512F-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm5, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm11
-; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24
+; AVX512F-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 1472(%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm7
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm18
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm27, %zmm7
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm7, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm11, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti128 $1, 3328(%rdi), %ymm0, %ymm8
-; AVX512F-NEXT:    vmovdqa 3264(%rdi), %ymm12
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-NEXT:    vmovdqa64 3200(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 3136(%rdi), %zmm6
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm6, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 3456(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 3392(%rdi), %zmm31
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm31, %zmm4
-; AVX512F-NEXT:    vmovdqa64 3520(%rdi), %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm9, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512F-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 1024(%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm30
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm6, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm5, %zmm9
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm29, %zmm27, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm4, %zmm4
+; AVX512F-NEXT:    vinserti128 $1, 2432(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 2368(%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 2304(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa 2880(%rdi), %ymm4
-; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm0
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm4 = ymm0[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,12]
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm15[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [9,0,7,0,9,0,7,0]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm16
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm4, %zmm16
+; AVX512F-NEXT:    vmovdqa64 2240(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %zmm11
+; AVX512F-NEXT:    vmovdqa64 2496(%rdi), %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm5, %zmm10
+; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm27, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm23
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti128 $1, 1984(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 1920(%rdi), %ymm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-NEXT:    vmovdqa64 %ymm4, %ymm31
+; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm20
+; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm6, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm15
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm15
+; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm27, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm26
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti128 $1, 3328(%rdi), %ymm0, %ymm15
+; AVX512F-NEXT:    vmovdqa 3264(%rdi), %ymm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm15 = ymm10[0,1,2,3,4,5],ymm15[6,7]
+; AVX512F-NEXT:    vmovdqa64 3200(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 3136(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm4, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 3456(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 3392(%rdi), %zmm4
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqa64 3520(%rdi), %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm27, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa 2880(%rdi), %ymm5
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm5 = ymm1[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm1, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [9,0,7,0,9,0,7,0]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm5, %zmm19
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,5,6,14,4,5,6,14]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm16 # 64-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm16, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm9
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm2[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm0, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm9
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm4, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm8, %zmm9
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm9, %zmm2
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm19 # 64-byte Folded Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm19, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm15
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm2[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm15 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm15
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm5, %zmm15
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm15 # 64-byte Folded Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm2
-; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm0, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm7
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm4, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm8, %zmm7
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm7, %zmm2
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm1, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm5, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm8, %zmm3
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 1536(%rdi), %ymm2
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm5, %zmm3
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -8310,430 +8305,421 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm1, %zmm3
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm4, %zmm3
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm5, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm29, %zmm8, %zmm3
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 2432(%rdi), %ymm2
 ; AVX512F-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm4, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm8, %zmm3
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm7 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm5, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm8, %zmm3
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 1984(%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa64 %ymm29, %ymm3
+; AVX512F-NEXT:    vmovdqa64 %ymm31, %ymm3
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm4, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm8, %zmm3
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 3328(%rdi), %ymm2
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm12[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm6, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vpermi2q %zmm31, %zmm5, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm8, %zmm4
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [10,3,10,3,10,3,10,3]
-; AVX512F-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm26, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [11,4,11,4,11,4,11,4]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [12,5,12,5,12,5,12,5]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,6,13,6,13,6,13,6]
-; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm12, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,9,2,9,2,9,2,9]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm26, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm7, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm12, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm17
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm26, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm7, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm12, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm25
-; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm26, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm3, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm7, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm12, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm30
-; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm26, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm3, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm7, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm12, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm12, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm27
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm4
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm31, %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm3, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm16
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm10[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm3, %zmm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm8, %zmm5
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm12, %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [10,3,10,3,10,3,10,3]
+; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm25, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [11,4,11,4,11,4,11,4]
+; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm28, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [12,5,12,5,12,5,12,5]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm2, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [2,9,2,9,2,9,2,9]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm15, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm25, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm8
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm31, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm5, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm15, %zmm21
+; AVX512F-NEXT:    vmovdqu64 %zmm21, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm25, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm31
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm28, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm5, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm2, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm7, %zmm28
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm31, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm12, %zmm3
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm31, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm15, %zmm24
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm25, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm23
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm28, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm5, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm27
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm2, %zmm27
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm15, %zmm30
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm25, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm28, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm5, %zmm19
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm2, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm15, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm25, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm28, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm16
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm29
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm25, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm30
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm4, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm28, %zmm4
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm30, %zmm28
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm5, %zmm8
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm30, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm2, %zmm10
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm30, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm15, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,9,0,5,6,9]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
 ; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm25
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,10,0,5,6,10]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm30
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm20
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm20
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm20
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm24
-; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm31
+; AVX512F-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm23
+; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm18
 ; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm14
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm28
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,11,0,5,6,11]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm28
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm25
-; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm22
-; AVX512F-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm17
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm26
+; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm19
+; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm11
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,12,0,5,6,12]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm23
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm27
+; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm21
 ; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm17
+; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm24
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,5,8,15,4,5,8,15]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm19
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm27
-; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm20
+; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm29
+; AVX512F-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm30
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm22
+; AVX512F-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    movb $24, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <0,7,14,u>
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm23, %zmm21
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm17
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm13[4,5,4,5],zmm29[4,5,4,5]
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [7,0,9,0,7,0,9,0]
-; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,11,4,11]
-; AVX512F-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm6, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm23, %zmm24
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,7,14,u>
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm16, %zmm27
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm1[4,5,4,5],zmm25[4,5,4,5]
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [13,6,13,6,13,6,13,6]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [7,0,9,0,7,0,9,0]
+; AVX512F-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm6, %zmm1
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [4,11,4,11]
+; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm23, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm23
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm16, %zmm23
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm31 {%k1} = zmm1[4,5,4,5],zmm17[4,5,4,5]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm6, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm31 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm31 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm30
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm23, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm21
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm16, %zmm21
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm1[4,5,4,5],zmm5[4,5,4,5]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm23, %zmm19
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm19 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm6, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm17
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm23, %zmm17
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm16, %zmm17
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm6, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm11
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm23, %zmm11
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm13, %zmm10, %zmm23
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm2[4,5,4,5],zmm4[4,5,4,5]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm13
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm16, %zmm13
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm29, %zmm22
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm4, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm1[4,5,4,5],zmm5[4,5,4,5]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm15, %zmm1, %zmm16
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm3[4,5,4,5],zmm4[4,5,4,5]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm14
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm26, %zmm24
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm4, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k1} = zmm1[4,5,4,5],zmm6[4,5,4,5]
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = <9,0,7,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm12
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,13]
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm20
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm3, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm1, %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm3, %zmm4
-; AVX512F-NEXT:    vpermi2q %zmm10, %zmm13, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm1, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm3, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm3, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm3, %zmm6
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm11, %zmm15, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm1, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -8747,15 +8733,15 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovups %zmm0, (%rsp) # 64-byte Spill
 ; AVX512F-NEXT:    movb $-32, %al
 ; AVX512F-NEXT:    kmovw %eax, %k2
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -8763,175 +8749,169 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm23 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm9 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm8 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm7 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm4 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm3 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm0 {%k2}
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa 2752(%rdi), %ymm15
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm22, %zmm28
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm15
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 2752(%rdi), %ymm2
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm24, %zmm25
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm25 {%k2}
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm2
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm18
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm2 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm15
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm22
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm15, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovups %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm1, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k2}
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa 1408(%rdi), %ymm15
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm0, %zmm26
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa 960(%rdi), %ymm14
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm14 = mem[8,9,10,11,12,13,14,15],ymm14[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm14[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm14, %zmm0, %zmm14
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa 2304(%rdi), %ymm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm1, %zmm20
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm20 {%k2}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm15 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm15 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 960(%rdi), %ymm1
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = ymm13[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa 1856(%rdi), %ymm13
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512F-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm13, %zmm0, %zmm13
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm1, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm2, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa 2304(%rdi), %ymm2
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm2, %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm2 {%k2}
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm15 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm15 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 1856(%rdi), %ymm11
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm11 = mem[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
+; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm11
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm11, %zmm24, %zmm11
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm11 {%k2}
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm15, %zmm24, %zmm24
 ; AVX512F-NEXT:    vmovdqa 3200(%rdi), %ymm15
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
 ; AVX512F-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm29, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512F-NEXT:    vmovdqa64 %zmm23, 448(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 384(%rsi)
+; AVX512F-NEXT:    vinserti32x4 $0, %xmm15, %zmm26, %zmm15
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm15 {%k2}
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 448(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, 384(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm19, 320(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, 256(%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm31, 192(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm25, 128(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm21, 64(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 128(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm27, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, 448(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 256(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 320(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 128(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 256(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 320(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, 128(%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, 192(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm16, 64(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 384(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 448(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 256(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 320(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 128(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, 192(%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm27, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm30, 64(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 384(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 384(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm3, 448(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm3, 256(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 320(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 128(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm30, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, 384(%rcx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, 448(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 256(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 320(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, 128(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm26, 192(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 64(%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm28, 384(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, 256(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 320(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 128(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 192(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, 384(%r8)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 448(%r9)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -8966,15 +8946,14 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 384(%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512F-NEXT:    vmovaps %zmm24, 384(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 256(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 128(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 192(%rax)
@@ -8982,51 +8961,51 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovaps %zmm0, (%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512F-NEXT:    addq $6728, %rsp # imm = 0x1A48
+; AVX512F-NEXT:    addq $6216, %rsp # imm = 0x1848
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i64_stride7_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $6664, %rsp # imm = 0x1A08
-; AVX512BW-NEXT:    vmovdqa64 3328(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 3264(%rdi), %zmm4
+; AVX512BW-NEXT:    subq $6280, %rsp # imm = 0x1888
+; AVX512BW-NEXT:    vmovdqa64 3328(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 3264(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 3008(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 2944(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2944(%rdi), %zmm20
 ; AVX512BW-NEXT:    vmovdqa64 2880(%rdi), %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2816(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2752(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 2688(%rdi), %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2432(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2368(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 2752(%rdi), %zmm18
+; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2688(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2432(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2368(%rdi), %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm14
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm16
 ; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
@@ -9034,867 +9013,863 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqa 464(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqa 16(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm3, %zmm1
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqa 1360(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqa 912(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqa 2256(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqa 1808(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm17, %zmm4, %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm0
 ; AVX512BW-NEXT:    vmovdqa 3152(%rdi), %xmm1
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
 ; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 2880(%rdi), %ymm0, %ymm1
-; AVX512BW-NEXT:    vmovdqa 2816(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-NEXT:    vinserti128 $1, 2880(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 2816(%rdi), %ymm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,11]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm5, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,7,14,0,0,7,14,0]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm5, %zmm15
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,7,14,0,0,7,14,0]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 3072(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm6, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 3072(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,5,6,13,4,5,6,13]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm2
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm2
-; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm2[6,7]
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm2
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm5, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm25
 ; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm8, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm6, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm3
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm2
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm8, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm2
-; AVX512BW-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm29
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm8, %zmm4
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 1472(%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm31
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm29
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm8, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm28
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm8, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm9, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm9
-; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm9[6,7]
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm27
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm8, %zmm10
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm7, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm24
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 2432(%rdi), %ymm0, %ymm9
-; AVX512BW-NEXT:    vmovdqa 2368(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm9[6,7]
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm6, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm8, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 2432(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 2368(%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512BW-NEXT:    vmovdqa64 2304(%rdi), %zmm21
 ; AVX512BW-NEXT:    vmovdqa64 2240(%rdi), %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 2496(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm4, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm8, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 1984(%rdi), %ymm0, %ymm9
-; AVX512BW-NEXT:    vmovdqa 1920(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm9[6,7]
-; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm18
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 2496(%rdi), %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 1984(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 1920(%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm27
 ; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm5, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm4, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm12, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti128 $1, 3328(%rdi), %ymm0, %ymm9
-; AVX512BW-NEXT:    vmovdqa 3264(%rdi), %ymm12
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0,1,2,3,4,5],ymm9[6,7]
-; AVX512BW-NEXT:    vmovdqa64 3200(%rdi), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 3136(%rdi), %zmm10
-; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm10, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 3456(%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 3392(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 3520(%rdi), %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm14
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 2880(%rdi), %ymm4
-; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm4 = mem[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,12]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [9,0,7,0,9,0,7,0]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm14, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 3328(%rdi), %ymm0, %ymm14
+; AVX512BW-NEXT:    vmovdqa 3264(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0,1,2,3,4,5],ymm14[6,7]
+; AVX512BW-NEXT:    vmovdqa64 3200(%rdi), %zmm10
+; AVX512BW-NEXT:    vmovdqa64 3136(%rdi), %zmm15
+; AVX512BW-NEXT:    vpermi2q %zmm10, %zmm15, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm15 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [4,5,6,14,4,5,6,14]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm15 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm8
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 3456(%rdi), %zmm28
+; AVX512BW-NEXT:    vmovdqa64 3392(%rdi), %zmm5
+; AVX512BW-NEXT:    vpermi2q %zmm28, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 3520(%rdi), %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm8, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm30
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm15, %zmm6, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 2880(%rdi), %ymm6
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm6 = ymm1[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [9,0,7,0,9,0,7,0]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm19
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,5,6,14,4,5,6,14]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm19 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm15, %zmm19, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm15
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm2 = ymm2[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm8
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm4, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm7
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm7, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 1536(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm3[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm5, %zmm3
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm15 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm15 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm2 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm3
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 2432(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm3 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 1536(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm5, %zmm3
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 1984(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm8, %zmm3
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm5, %zmm3
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm8, %zmm3
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 2432(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm8, %zmm3
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 1984(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm6, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm31
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm3
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 3328(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vpermi2q %zmm10, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm28, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm8, %zmm6
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm6, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa 3328(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm12[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm10, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm5, %zmm4
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [10,3,10,3,10,3,10,3]
 ; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm19, %zmm21
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [11,4,11,4,11,4,11,4]
-; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [12,5,12,5,12,5,12,5]
-; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [11,4,11,4,11,4,11,4]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm22, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [12,5,12,5,12,5,12,5]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [2,9,2,9,2,9,2,9]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm19, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm22, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm19, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm22, %zmm30
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm18, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm8, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,6,13,6,13,6,13,6]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm11, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,9,2,9,2,9,2,9]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm19, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm20, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 %zmm29, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm20, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm24
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm20, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm19, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm22, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm27
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm29
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm19, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm20, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm18, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm11, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm28
-; AVX512BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm19, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm20, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm11, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm30
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm20
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm29
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm11, %zmm1
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm19, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm22, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm8, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm19, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm22, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm8, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm3, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm19, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm28, %zmm5, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm22, %zmm5
+; AVX512BW-NEXT:    vpermi2q %zmm28, %zmm0, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm8, %zmm7
+; AVX512BW-NEXT:    vpermi2q %zmm28, %zmm0, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm21
+; AVX512BW-NEXT:    vpermi2q %zmm28, %zmm0, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm12
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,9,0,5,6,9]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm26
-; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm19
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,10,0,5,6,10]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm30
+; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm25
+; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm22
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,11,0,5,6,11]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm27
+; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
 ; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,6,12,0,5,6,12]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm29
+; AVX512BW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm17
 ; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm27
-; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm13
 ; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm11
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,5,8,15,4,5,8,15]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm22
-; AVX512BW-NEXT:    movb $24, %al
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,7,14,u>
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm16, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[4,5,4,5],zmm31[4,5,4,5]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [7,0,9,0,7,0,9,0]
-; AVX512BW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm26, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [4,11,4,11]
-; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm16, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm1[4,5,4,5],zmm25[4,5,4,5]
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm16, %zmm27
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm28
+; AVX512BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    movb $24, %al
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm14 = <0,7,14,u>
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm14, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm1[4,5,4,5],zmm23[4,5,4,5]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm1[4,5,4,5],zmm0[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [13,6,13,6,13,6,13,6]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [7,0,9,0,7,0,9,0]
+; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [4,11,4,11]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm16, %zmm25
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm14, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm31
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm1[4,5,4,5],zmm9[4,5,4,5]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm29
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm30
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm16, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm28
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm27
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm16, %zmm13
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm25
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm26, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm14, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm1[4,5,4,5],zmm3[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm28, %zmm15, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm16, %zmm13, %zmm14
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm2[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm26, %zmm21
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k1} = zmm5[4,5,4,5],zmm4[4,5,4,5]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[4,5,4,5],zmm4[4,5,4,5]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm18
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm23, %zmm21
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm23
 ; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm12
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm3[4,5,4,5],zmm6[4,5,4,5]
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = <9,0,7,u>
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm11
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,13]
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm1, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm4
-; AVX512BW-NEXT:    vpermi2q %zmm15, %zmm28, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermi2q %zmm13, %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm15
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovups (%rsp), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm30, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vmovups %zmm1, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm1 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm1 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm1 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm24, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm22, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    movb $-32, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm12 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm12 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm31 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm14 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm6 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm30 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm15 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm8 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa 2752(%rdi), %ymm1
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm21, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k2}
-; AVX512BW-NEXT:    vmovdqa 512(%rdi), %ymm1
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm18
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm1, %zmm21, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqa 512(%rdi), %ymm2
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm2 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm2, %zmm13, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2 {%k2}
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm13
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm13, %xmm13
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm13, %zmm15, %zmm13
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm13 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %ymm18
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm18 = mem[8,9,10,11,12,13,14,15],ymm18[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm18[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm18, %xmm18
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm18, %zmm19, %zmm18
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm18 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %ymm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm18, %zmm15, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm18 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %ymm19
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm19 = mem[8,9,10,11,12,13,14,15],ymm19[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm19[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm19, %xmm19
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm19, %zmm20, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm19 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %ymm20
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm19, %zmm15, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm19 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 2304(%rdi), %ymm20
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm20, %zmm21, %zmm20
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm20 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 2304(%rdi), %ymm22
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm20, %zmm15, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm20 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %ymm21
+; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm21 = mem[8,9,10,11,12,13,14,15],ymm21[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm21[16,17,18,19,20,21,22,23]
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm21, %xmm21
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm21, %zmm15, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm21 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 3200(%rdi), %ymm22
 ; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm22 = mem[8,9,10,11,12,13,14,15],ymm22[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm22[16,17,18,19,20,21,22,23]
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm22, %xmm22
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm22, %zmm21, %zmm22
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm22 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %ymm24
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm24 = mem[8,9,10,11,12,13,14,15],ymm24[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm24[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm24, %xmm24
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm24, %zmm21, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm21 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 3200(%rdi), %ymm24
-; AVX512BW-NEXT:    vpalignr {{.*#+}} ymm24 = mem[8,9,10,11,12,13,14,15],ymm24[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm24[16,17,18,19,20,21,22,23]
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm24, %xmm24
-; AVX512BW-NEXT:    vinserti32x4 $0, %xmm24, %zmm26, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm24 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, 448(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, 384(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, 320(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 256(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, 192(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, 128(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm10, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm10, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, 448(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 256(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 320(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 192(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 384(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 448(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, 256(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, 320(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, 128(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, 192(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, 64(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, 384(%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, 448(%r8)
+; AVX512BW-NEXT:    vinserti32x4 $0, %xmm22, %zmm23, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm22 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 448(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 384(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 320(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 256(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, 192(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, 128(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, 64(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm12, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 448(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 256(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 320(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, 128(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 192(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 384(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm3, 448(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 256(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, 320(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, 128(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, 192(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, 384(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, 448(%r8)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, 256(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 320(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, 128(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 192(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 384(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 320(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 128(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 192(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 384(%r8)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 448(%r9)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9931,9 +9906,9 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 256(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 320(%rax)
@@ -9945,7 +9920,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovaps %zmm0, (%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512BW-NEXT:    addq $6664, %rsp # imm = 0x1A08
+; AVX512BW-NEXT:    addq $6280, %rsp # imm = 0x1888
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <448 x i64>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
index 4738b2344255b..30dbf8910983b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
@@ -815,229 +815,245 @@ define void @load_i64_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512F-LABEL: load_i64_stride8_vf8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm12
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm11
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm5, %zmm6
-; AVX512F-NEXT:    vpermi2q %zmm12, %zmm3, %zmm5
-; AVX512F-NEXT:    movb $-64, %al
-; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    pushq %rbx
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm7
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm8, %zmm10
-; AVX512F-NEXT:    vpermi2q %zmm12, %zmm3, %zmm8
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,0,8,0,8,0,8]
 ; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm9, %zmm13
-; AVX512F-NEXT:    vpermi2q %zmm12, %zmm3, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm9 {%k1}
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm14
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm14[0],ymm13[0],ymm14[2],ymm13[2]
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm16
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm17
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm17[0],ymm16[0],ymm17[2],ymm16[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm15[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm9
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm15
-; AVX512F-NEXT:    vpermi2q %zmm12, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm14[1],ymm13[1],ymm14[3],ymm13[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm14 = ymm17[1],ymm16[1],ymm17[3],ymm16[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm14[2,3],ymm13[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm13
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [4,12,4,12,4,12,4,12]
-; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm3[0],zmm12[0],zmm3[2],zmm12[2],zmm3[4],zmm12[4],zmm3[6],zmm12[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm16 = zmm3[1],zmm12[1],zmm3[3],zmm12[3],zmm3[5],zmm12[5],zmm3[7],zmm12[7]
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm15, %zmm17
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm19, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k1} = zmm11[0],zmm7[0],zmm11[2],zmm7[2],zmm11[4],zmm7[4],zmm11[6],zmm7[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm11[1],zmm7[1],zmm11[3],zmm7[3],zmm11[5],zmm7[5],zmm11[7],zmm7[7]
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm14, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm2, %zmm14
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [4,12,4,12]
-; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm1, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm0
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm9
+; AVX512F-NEXT:    movb $-64, %bl
+; AVX512F-NEXT:    kmovw %ebx, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm10
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm10, %ymm10
+; AVX512F-NEXT:    vinserti128 $1, 128(%rdi), %ymm8, %ymm11
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm9, %zmm16
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm7, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm12 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm11[1],ymm10[1],ymm11[3],ymm10[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm12, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm7, %zmm10
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,2,10,2,10,2,10]
 ; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm11, %zmm12
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm2, %zmm11
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm1, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm11, %ymm11
-; AVX512F-NEXT:    vinserti128 $1, 128(%rdi), %ymm6, %ymm6
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm14 = ymm6[0],ymm11[0],ymm6[2],ymm11[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm5, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm6[1],ymm11[1],ymm6[3],ymm11[3]
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm8, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm16 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm16, %zmm7
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm2, %zmm15
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,14,6,14]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm12
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm13[0],ymm12[0],ymm13[2],ymm12[2]
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm14
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm8[2,3],ymm10[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm11, %zmm10
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm13[1],ymm12[1],ymm13[3],ymm12[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm12[2,3],ymm8[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm11, %zmm11
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm6[0],zmm4[0],zmm6[2],zmm4[2],zmm6[4],zmm4[4],zmm6[6],zmm4[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm12 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [4,12,4,12]
 ; AVX512F-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm1, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm17, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm19, %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,12]
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm7, %zmm12
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm13 = zmm6[1],zmm4[1],zmm6[3],zmm4[3],zmm6[5],zmm4[5],zmm6[7],zmm4[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm12
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = [5,13]
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm13
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm13 {%k1} = zmm7[0],zmm5[0],zmm7[2],zmm5[2],zmm7[4],zmm5[4],zmm7[6],zmm5[6]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm14
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm15 = [6,14]
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm15
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm13, %zmm13
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm6, %zmm14
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm14 {%k1} = zmm7[1],zmm5[1],zmm7[3],zmm5[3],zmm7[5],zmm5[5],zmm7[7],zmm5[7]
 ; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,7,15]
 ; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, (%r10)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rdi)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,15]
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm16, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, (%r11)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, (%r10)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512F-NEXT:    popq %rbx
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i64_stride8_vf8:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm12
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm11
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [0,8,0,8,0,8,0,8]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm6
-; AVX512BW-NEXT:    vpermi2q %zmm12, %zmm3, %zmm5
-; AVX512BW-NEXT:    movb $-64, %al
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    pushq %rbx
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm7
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm8, %zmm10
-; AVX512BW-NEXT:    vpermi2q %zmm12, %zmm3, %zmm8
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm13
-; AVX512BW-NEXT:    vpermi2q %zmm12, %zmm3, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm9 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm14
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm14[0],ymm13[0],ymm14[2],ymm13[2]
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm16
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm17
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm17[0],ymm16[0],ymm17[2],ymm16[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm15[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm9
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm15
-; AVX512BW-NEXT:    vpermi2q %zmm12, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm14[1],ymm13[1],ymm14[3],ymm13[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm14 = ymm17[1],ymm16[1],ymm17[3],ymm16[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm14[2,3],ymm13[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm13
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [4,12,4,12,4,12,4,12]
-; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm3[0],zmm12[0],zmm3[2],zmm12[2],zmm3[4],zmm12[4],zmm3[6],zmm12[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm16 = zmm3[1],zmm12[1],zmm3[3],zmm12[3],zmm3[5],zmm12[5],zmm3[7],zmm12[7]
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm17
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm19, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k1} = zmm11[0],zmm7[0],zmm11[2],zmm7[2],zmm11[4],zmm7[4],zmm11[6],zmm7[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm11[1],zmm7[1],zmm11[3],zmm7[3],zmm11[5],zmm7[5],zmm11[7],zmm7[7]
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm14
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [4,12,4,12]
-; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm1, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm0
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm6, %zmm9
+; AVX512BW-NEXT:    movb $-64, %bl
+; AVX512BW-NEXT:    kmovd %ebx, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm10
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm10, %ymm10
+; AVX512BW-NEXT:    vinserti128 $1, 128(%rdi), %ymm8, %ymm11
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm9, %zmm16
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm7, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm6, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm12 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm11[1],ymm10[1],ymm11[3],ymm10[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm12, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm7, %zmm10
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [2,10,2,10,2,10,2,10]
 ; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm11, %zmm12
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm11
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm1, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm11, %ymm11
-; AVX512BW-NEXT:    vinserti128 $1, 128(%rdi), %ymm6, %ymm6
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm14 = ymm6[0],ymm11[0],ymm6[2],ymm11[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm5, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm6[1],ymm11[1],ymm6[3],ymm11[3]
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm8, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm16 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm16, %zmm7
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm15
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,14,6,14]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm6, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k1}
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm12
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm13[0],ymm12[0],ymm13[2],ymm12[2]
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm14
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm8[2,3],ymm10[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm11, %zmm10
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm6, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm13[1],ymm12[1],ymm13[3],ymm12[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm12[2,3],ymm8[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm11, %zmm11
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm7, %zmm8
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm6[0],zmm4[0],zmm6[2],zmm4[2],zmm6[4],zmm4[4],zmm6[6],zmm4[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm12 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [4,12,4,12]
 ; AVX512BW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm1, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm17, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm2
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,12]
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm7, %zmm12
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm13 = zmm6[1],zmm4[1],zmm6[3],zmm4[3],zmm6[5],zmm4[5],zmm6[7],zmm4[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm12
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [5,13]
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm6, %zmm13
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm13 {%k1} = zmm7[0],zmm5[0],zmm7[2],zmm5[2],zmm7[4],zmm5[4],zmm7[6],zmm5[6]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm15 = [6,14]
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm13, %zmm13
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm6, %zmm14
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm14 {%k1} = zmm7[1],zmm5[1],zmm7[3],zmm5[3],zmm7[5],zmm5[5],zmm7[7],zmm5[7]
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,7,15]
 ; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%r10)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rdi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,15]
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%r11)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r10)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512BW-NEXT:    popq %rbx
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <64 x i64>, ptr %in.vec, align 64
@@ -1925,445 +1941,441 @@ define void @load_i64_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-LABEL: load_i64_stride8_vf16:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $264, %rsp # imm = 0x108
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm18
-; AVX512F-NEXT:    vmovaps 640(%rdi), %zmm0
-; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm31
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512F-NEXT:    pushq %rax
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm31
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm10
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm30
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm8
 ; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm28
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm24
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm12
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm13
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm19, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm19, %zmm15
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm21
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm26
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm15
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm16
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm17, %zmm16
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm14, %zmm18
 ; AVX512F-NEXT:    movb $-64, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %xmm16
-; AVX512F-NEXT:    vinserti32x4 $1, 192(%rdi), %ymm16, %ymm21
-; AVX512F-NEXT:    vinserti32x4 $1, 128(%rdi), %ymm0, %ymm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm16[0],ymm21[0],ymm16[2],ymm21[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm15, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm18 {%k1}
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %xmm16
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %xmm19
+; AVX512F-NEXT:    vinserti32x4 $1, 192(%rdi), %ymm19, %ymm0
+; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vinserti32x4 $1, 128(%rdi), %ymm16, %ymm4
+; AVX512F-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm19 = ymm4[0],ymm0[0],ymm4[2],ymm0[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm19, %zmm18, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm19, %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm28, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm5
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm7, %zmm17
+; AVX512F-NEXT:    vpermi2q %zmm11, %zmm21, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm18, %zmm2
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm19, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k1}
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm2
 ; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm20
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm20[0],ymm5[0],ymm20[2],ymm5[2]
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm22
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm23[0],ymm22[0],ymm23[2],ymm22[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm1
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm28, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqa 704(%rdi), %ymm1
-; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm2
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %ymm25
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %ymm27
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm27[0],ymm25[0],ymm27[2],ymm25[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm8[2,3],ymm15[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm20[0],ymm2[0],ymm20[2],ymm2[2]
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm24
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm25
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm25[0],ymm24[0],ymm25[2],ymm24[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm5[2,3],ymm4[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm17, %zmm17
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm7, %zmm18
+; AVX512F-NEXT:    vpermi2q %zmm11, %zmm21, %zmm19
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm19 {%k1}
+; AVX512F-NEXT:    vmovdqa 704(%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm5
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %ymm27
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %ymm28
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm27[0],ymm28[2],ymm27[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm18
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm30
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm30 {%k1}
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm26
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm20[1],ymm5[1],ymm20[3],ymm5[3]
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm15
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm23[1],ymm22[1],ymm23[3],ymm22[3]
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm13
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm6, %zmm19
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm19 {%k1}
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm20[1],ymm2[1],ymm20[3],ymm2[3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm16, %zmm22
+; AVX512F-NEXT:    vpermi2q %zmm11, %zmm21, %zmm16
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm25[1],ymm24[1],ymm25[3],ymm24[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm19, %zmm19
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm7, %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm11, %zmm21, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm28[1],ymm27[1],ymm28[3],ymm27[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm20
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm25, %zmm23
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm27 = zmm3[0],zmm12[0],zmm3[2],zmm12[2],zmm3[4],zmm12[4],zmm3[6],zmm12[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm27 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm13, %zmm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm28 = zmm21[0],zmm11[0],zmm21[2],zmm11[2],zmm21[4],zmm11[4],zmm21[6],zmm11[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm29 = zmm3[1],zmm12[1],zmm3[3],zmm12[3],zmm3[5],zmm12[5],zmm3[7],zmm12[7]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [6,14,6,14,6,14,6,14]
 ; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm7[2,3],ymm5[2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm23, %zmm29
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm30, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm24
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm23, %zmm24
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm3
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm21[1],zmm11[1],zmm21[3],zmm11[3],zmm21[5],zmm11[5],zmm21[7],zmm11[7]
+; AVX512F-NEXT:    vpermi2q %zmm11, %zmm21, %zmm23
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm0, %zmm21
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,12]
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm5
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm28, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm27[1],ymm25[1],ymm27[3],ymm25[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm22
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm0, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm27, %zmm27
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm5
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm7, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm28 {%k1}
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm12
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm12, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm11, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm6
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k1} = zmm15[0],zmm9[0],zmm15[2],zmm9[2],zmm15[4],zmm9[4],zmm15[6],zmm9[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm15[1],zmm9[1],zmm15[3],zmm9[3],zmm15[5],zmm9[5],zmm15[7],zmm9[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [1,9,1,9,1,9,1,9]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm6
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm1 = zmm24[0],zmm14[0],zmm24[2],zmm14[2],zmm24[4],zmm14[4],zmm24[6],zmm14[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm0, %zmm2
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [4,12,4,12]
-; AVX512F-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm5, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm1, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm28[0],zmm11[0],zmm28[2],zmm11[2],zmm28[4],zmm11[4],zmm28[6],zmm11[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm31, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm8, %zmm5
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm27
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,13,5,13,5,13,5,13]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm7, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm30
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm5, %zmm30
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm24[1],zmm14[1],zmm24[3],zmm14[3],zmm24[5],zmm14[5],zmm24[7],zmm14[7]
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm2, %zmm24
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k1} = zmm13[0],zmm12[0],zmm13[2],zmm12[2],zmm13[4],zmm12[4],zmm13[6],zmm12[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm24 {%k1} = zmm13[1],zmm12[1],zmm13[3],zmm12[3],zmm13[5],zmm12[5],zmm13[7],zmm12[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm23, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm6, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm23, %zmm3
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm28, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm5, %zmm1
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm13 = zmm28[1],zmm11[1],zmm28[3],zmm11[3],zmm28[5],zmm11[5],zmm28[7],zmm11[7]
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm2, %zmm28
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k1} = zmm10[0],zmm9[0],zmm10[2],zmm9[2],zmm10[4],zmm9[4],zmm10[6],zmm9[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm28 {%k1} = zmm10[1],zmm9[1],zmm10[3],zmm9[3],zmm10[5],zmm9[5],zmm10[7],zmm9[7]
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm6, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm13 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm31, %zmm10, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm8, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm13, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm5, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm2, %zmm15
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm6
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm25, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm29 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm9, %zmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,13]
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm2, %zmm15
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm29, %zmm29
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm7, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm4 {%k1}
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm12, %zmm9
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm11, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm7, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
 ; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
 ; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm31, %zmm10, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm2, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm9, %zmm2
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [7,15,7,15]
-; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm10, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512F-NEXT:    vmovdqa 576(%rdi), %xmm11
-; AVX512F-NEXT:    vinserti128 $1, 704(%rdi), %ymm11, %ymm11
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm0, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa 512(%rdi), %xmm10
-; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm10, %ymm10
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm19, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm29 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm14 = ymm16[1],ymm21[1],ymm16[3],ymm21[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm29, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm23, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm1, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm24, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm28, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 64(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, (%rsi)
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k1} = zmm7[0],zmm26[0],zmm7[2],zmm26[2],zmm7[4],zmm26[4],zmm7[6],zmm26[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm21 {%k1} = zmm7[1],zmm26[1],zmm7[3],zmm26[3],zmm7[5],zmm26[5],zmm7[7],zmm26[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm9, %zmm4
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [7,15,7,15]
+; AVX512F-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm7, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm12, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm7, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm8, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [7,15]
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm7, %zmm10
+; AVX512F-NEXT:    vmovdqa 576(%rdi), %xmm15
+; AVX512F-NEXT:    vinserti128 $1, 704(%rdi), %ymm15, %ymm15
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm11, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm7, %zmm11
+; AVX512F-NEXT:    vmovdqa 512(%rdi), %xmm7
+; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm7, %ymm7
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm25 = ymm7[0],ymm15[0],ymm7[2],ymm15[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm25, %zmm14, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm6 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm22, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm7[1],ymm15[1],ymm7[3],ymm15[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm16, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm24, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm23, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm3, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm21, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 64(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm7, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 64(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, (%rdx)
-; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 64(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm27, 64(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm25, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, 64(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm27, (%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm29, (%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, (%rax)
-; AVX512F-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512F-NEXT:    popq %rax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i64_stride8_vf16:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $264, %rsp # imm = 0x108
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovaps 640(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512BW-NEXT:    pushq %rax
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm31
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm10
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm30
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm8
 ; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm24
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm12
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm13
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [0,8,0,8,0,8,0,8]
-; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm19, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm19, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm21
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm26
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm15
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm17, %zmm16
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm18
 ; AVX512BW-NEXT:    movb $-64, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %xmm16
-; AVX512BW-NEXT:    vinserti32x4 $1, 192(%rdi), %ymm16, %ymm21
-; AVX512BW-NEXT:    vinserti32x4 $1, 128(%rdi), %ymm0, %ymm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm16[0],ymm21[0],ymm16[2],ymm21[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm15, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm18 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %xmm16
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %xmm19
+; AVX512BW-NEXT:    vinserti32x4 $1, 192(%rdi), %ymm19, %ymm0
+; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vinserti32x4 $1, 128(%rdi), %ymm16, %ymm4
+; AVX512BW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm19 = ymm4[0],ymm0[0],ymm4[2],ymm0[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm19, %zmm18, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm19, %zmm0
-; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm28, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm5
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm7, %zmm17
+; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm21, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm2
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm19, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm2
 ; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm20
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm20[0],ymm5[0],ymm20[2],ymm5[2]
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm22
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm23[0],ymm22[0],ymm23[2],ymm22[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm28, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa 704(%rdi), %ymm1
-; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm2
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %ymm25
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %ymm27
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm27[0],ymm25[0],ymm27[2],ymm25[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm8[2,3],ymm15[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm20[0],ymm2[0],ymm20[2],ymm2[2]
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm24
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm25
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm25[0],ymm24[0],ymm25[2],ymm24[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm5[2,3],ymm4[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm17, %zmm17
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm7, %zmm18
+; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm21, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm19 {%k1}
+; AVX512BW-NEXT:    vmovdqa 704(%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm5
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %ymm27
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %ymm28
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm27[0],ymm28[2],ymm27[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm18
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm30 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm26
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm20[1],ymm5[1],ymm20[3],ymm5[3]
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm15
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm23[1],ymm22[1],ymm23[3],ymm22[3]
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm13
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm19
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm19 {%k1}
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm20[1],ymm2[1],ymm20[3],ymm2[3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm16, %zmm22
+; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm21, %zmm16
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm25[1],ymm24[1],ymm25[3],ymm24[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm19, %zmm19
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm7, %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm21, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm28[1],ymm27[1],ymm28[3],ymm27[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm20
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm23
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm27 = zmm3[0],zmm12[0],zmm3[2],zmm12[2],zmm3[4],zmm12[4],zmm3[6],zmm12[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm27 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm13, %zmm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 = zmm21[0],zmm11[0],zmm21[2],zmm11[2],zmm21[4],zmm11[4],zmm21[6],zmm11[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm29 = zmm3[1],zmm12[1],zmm3[3],zmm12[3],zmm3[5],zmm12[5],zmm3[7],zmm12[7]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [6,14,6,14,6,14,6,14]
 ; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm7[2,3],ymm5[2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm23, %zmm29
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm30, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm24
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm23, %zmm24
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm21[1],zmm11[1],zmm21[3],zmm11[3],zmm21[5],zmm11[5],zmm21[7],zmm11[7]
+; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm21, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm21
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,12]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm5
-; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm28, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm27[1],ymm25[1],ymm27[3],ymm25[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm22
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm27, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm5
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm7, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm28 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm12, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm11, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm6
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k1} = zmm15[0],zmm9[0],zmm15[2],zmm9[2],zmm15[4],zmm9[4],zmm15[6],zmm9[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm15[1],zmm9[1],zmm15[3],zmm9[3],zmm15[5],zmm9[5],zmm15[7],zmm9[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [1,9,1,9,1,9,1,9]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm1 = zmm24[0],zmm14[0],zmm24[2],zmm14[2],zmm24[4],zmm14[4],zmm24[6],zmm14[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm2
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [4,12,4,12]
-; AVX512BW-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm5, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm1, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm28[0],zmm11[0],zmm28[2],zmm11[2],zmm28[4],zmm11[4],zmm28[6],zmm11[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm8, %zmm5
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm27
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,13,5,13,5,13,5,13]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm7, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm30
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm24[1],zmm14[1],zmm24[3],zmm14[3],zmm24[5],zmm14[5],zmm24[7],zmm14[7]
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm24
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k1} = zmm13[0],zmm12[0],zmm13[2],zmm12[2],zmm13[4],zmm12[4],zmm13[6],zmm12[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm24 {%k1} = zmm13[1],zmm12[1],zmm13[3],zmm12[3],zmm13[5],zmm12[5],zmm13[7],zmm12[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm23, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm23, %zmm3
-; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm28, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm5, %zmm1
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm13 = zmm28[1],zmm11[1],zmm28[3],zmm11[3],zmm28[5],zmm11[5],zmm28[7],zmm11[7]
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm28
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k1} = zmm10[0],zmm9[0],zmm10[2],zmm9[2],zmm10[4],zmm9[4],zmm10[6],zmm9[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm28 {%k1} = zmm10[1],zmm9[1],zmm10[3],zmm9[3],zmm10[5],zmm9[5],zmm10[7],zmm9[7]
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm13 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm10, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm8, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm13, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm5, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm6
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm29 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm9, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,13]
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm2, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm29, %zmm29
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm7, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm4 {%k1}
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm12, %zmm9
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm11, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm7, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
 ; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm10, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm2, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm2
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [7,15,7,15]
-; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm10, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-NEXT:    vmovdqa 576(%rdi), %xmm11
-; AVX512BW-NEXT:    vinserti128 $1, 704(%rdi), %ymm11, %ymm11
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm0, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa 512(%rdi), %xmm10
-; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm10, %ymm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm19, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm29 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm14 = ymm16[1],ymm21[1],ymm16[3],ymm21[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm29, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm23, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm1, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm24, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm28, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, (%rsi)
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k1} = zmm7[0],zmm26[0],zmm7[2],zmm26[2],zmm7[4],zmm26[4],zmm7[6],zmm26[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm21 {%k1} = zmm7[1],zmm26[1],zmm7[3],zmm26[3],zmm7[5],zmm26[5],zmm7[7],zmm26[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm9, %zmm4
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [7,15,7,15]
+; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm7, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm12, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm8, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [7,15]
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm7, %zmm10
+; AVX512BW-NEXT:    vmovdqa 576(%rdi), %xmm15
+; AVX512BW-NEXT:    vinserti128 $1, 704(%rdi), %ymm15, %ymm15
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm11, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm11
+; AVX512BW-NEXT:    vmovdqa 512(%rdi), %xmm7
+; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm7, %ymm7
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm25 = ymm7[0],ymm15[0],ymm7[2],ymm15[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm25, %zmm14, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm22 {%k1}
+; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm6 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm22, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm7[1],ymm15[1],ymm7[3],ymm15[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm16, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm24, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm23, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm3, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm21, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 64(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm7, (%rsi)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rdx)
-; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, 64(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, 64(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, (%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, (%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%rax)
-; AVX512BW-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512BW-NEXT:    popq %rax
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <128 x i64>, ptr %in.vec, align 64
@@ -4240,1031 +4252,1009 @@ define void @load_i64_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-LABEL: load_i64_stride8_vf32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $2632, %rsp # imm = 0xA48
+; AVX512F-NEXT:    subq $2696, %rsp # imm = 0xA88
 ; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm28
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm21
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm18
-; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm22
-; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm30
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm20
+; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm21
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm16
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm14, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm14, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm5, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm14, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm27, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm27, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm27, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm14, %zmm2, %zmm27
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm0[0],zmm21[0],zmm0[2],zmm21[2],zmm0[4],zmm21[4],zmm0[6],zmm21[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm4[0],zmm16[0],zmm4[2],zmm16[2],zmm4[4],zmm16[4],zmm4[6],zmm16[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm2[0],zmm14[0],zmm2[2],zmm14[2],zmm2[4],zmm14[4],zmm2[6],zmm14[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm0[1],zmm21[1],zmm0[3],zmm21[3],zmm0[5],zmm21[5],zmm0[7],zmm21[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm4[1],zmm16[1],zmm4[3],zmm16[3],zmm4[5],zmm16[5],zmm4[7],zmm16[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm6, %zmm8
+; AVX512F-NEXT:    vpermi2q %zmm14, %zmm2, %zmm6
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm9 = zmm2[1],zmm14[1],zmm2[3],zmm14[3],zmm2[5],zmm14[5],zmm2[7],zmm14[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm28 = zmm3[0],zmm1[0],zmm3[2],zmm1[2],zmm3[4],zmm1[4],zmm3[6],zmm1[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm3[1],zmm1[1],zmm3[3],zmm1[3],zmm3[5],zmm1[5],zmm3[7],zmm1[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
 ; AVX512F-NEXT:    movb $-64, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm0
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm0[0],zmm2[0],zmm0[2],zmm2[2],zmm0[4],zmm2[4],zmm0[6],zmm2[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm0[1],zmm2[1],zmm0[3],zmm2[3],zmm0[5],zmm2[5],zmm0[7],zmm2[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm31, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm30, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm23
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm29, %zmm26
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm24, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm21
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k1} = zmm21[0],zmm2[0],zmm21[2],zmm2[2],zmm21[4],zmm2[4],zmm21[6],zmm2[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm21[1],zmm2[1],zmm21[3],zmm2[3],zmm21[5],zmm2[5],zmm21[7],zmm2[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm31, %zmm21
+; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm30, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm29, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm24, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm18[0],zmm0[0],zmm18[2],zmm0[2],zmm18[4],zmm0[4],zmm18[6],zmm0[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm18[1],zmm0[1],zmm18[3],zmm0[3],zmm18[5],zmm0[5],zmm18[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %ymm29
-; AVX512F-NEXT:    vmovdqa 1152(%rdi), %ymm13
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm13[0],ymm29[0],ymm13[2],ymm29[2]
-; AVX512F-NEXT:    vmovdqa 1088(%rdi), %ymm6
-; AVX512F-NEXT:    vmovdqa 1024(%rdi), %ymm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm9[2,3],ymm5[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm21
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm31, %zmm18
+; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm30, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqa 704(%rdi), %ymm8
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm8, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm29, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm24, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm31
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm30
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm8
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm29
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm24
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm14 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %ymm19
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %ymm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm18[0],ymm19[0],ymm18[2],ymm19[2]
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %ymm17
+; AVX512F-NEXT:    vmovdqa 1024(%rdi), %ymm10
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm17[0],ymm10[2],ymm17[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm15[2,3],ymm9[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %ymm16
 ; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm9
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[2],ymm8[2]
-; AVX512F-NEXT:    vmovdqa 576(%rdi), %ymm12
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %ymm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm16[0],ymm12[0],ymm16[2],ymm12[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm11[2,3],ymm10[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm5, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm9[0],ymm16[0],ymm9[2],ymm16[2]
+; AVX512F-NEXT:    vmovdqa 576(%rdi), %ymm11
+; AVX512F-NEXT:    vmovdqa 512(%rdi), %ymm7
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm14 = ymm7[0],ymm11[0],ymm7[2],ymm11[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm12, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
 ; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm5
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm25
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm25[0],ymm5[0],ymm25[2],ymm5[2]
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm26
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm27
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm27[0],ymm26[0],ymm27[2],ymm26[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm15[2,3],ymm11[2,3]
-; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm10, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm0, %zmm10
-; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512F-NEXT:    vpermi2q %zmm14, %zmm23, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqa 1728(%rdi), %ymm10
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %ymm28
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm28[0],ymm10[0],ymm28[2],ymm10[2]
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %ymm31
-; AVX512F-NEXT:    vmovdqa 1536(%rdi), %ymm2
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm14 = ymm2[0],ymm31[0],ymm2[2],ymm31[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm14[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm14
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm14[0],ymm5[0],ymm14[2],ymm5[2]
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm6
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm15[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm13[1],ymm29[1],ymm13[3],ymm29[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm14, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm29
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm9[1],ymm8[1],ymm9[3],ymm8[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm16[1],ymm12[1],ymm16[3],ymm12[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm25[1],ymm5[1],ymm25[3],ymm5[3]
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm5
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm27[1],ymm26[1],ymm27[3],ymm26[3]
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm8
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm9
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm11, %zmm23, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm28[1],ymm10[1],ymm28[3],ymm10[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],ymm31[1],ymm2[3],ymm31[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqa 1728(%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqa 1664(%rdi), %ymm13
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm13[0],ymm4[0],ymm13[2],ymm4[2]
+; AVX512F-NEXT:    vmovdqa 1600(%rdi), %ymm3
+; AVX512F-NEXT:    vmovdqa 1536(%rdi), %ymm1
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm12[2,3],ymm15[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm18[0],zmm21[0],zmm18[2],zmm21[2],zmm18[4],zmm21[4],zmm18[6],zmm21[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm13
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm6[0],zmm29[0],zmm6[2],zmm29[2],zmm6[4],zmm29[4],zmm6[6],zmm29[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm22
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm20
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm14[0],zmm30[2],zmm14[2],zmm30[4],zmm14[4],zmm30[6],zmm14[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm28
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm7[0],zmm11[0],zmm7[2],zmm11[2],zmm7[4],zmm11[4],zmm7[6],zmm11[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm0
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm0 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm18[1],ymm19[1],ymm18[3],ymm19[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm10 = ymm10[1],ymm17[1],ymm10[3],ymm17[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm10[2,3],ymm12[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm31, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm0 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm7[1],ymm11[1],ymm7[3],ymm11[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [1,9,1,9,1,9,1,9]
-; AVX512F-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm24, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm14[1],ymm5[1],ymm14[3],ymm5[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],ymm6[1],ymm2[3],ymm6[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm2[2,3],ymm5[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm4, %zmm3
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm18[1],zmm21[1],zmm18[3],zmm21[3],zmm18[5],zmm21[5],zmm18[7],zmm21[7]
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm11, %zmm18
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm25[0],zmm17[0],zmm25[2],zmm17[2],zmm25[4],zmm17[4],zmm25[6],zmm17[6]
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm18 {%k1} = zmm25[1],zmm17[1],zmm25[3],zmm17[3],zmm25[5],zmm17[5],zmm25[7],zmm17[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm31, %zmm25
-; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm24, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [5,13,5,13,5,13,5,13]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm5, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm5, %zmm3
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm9, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm27 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm13[1],ymm4[1],ymm13[3],ymm4[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm3[1],ymm1[3],ymm3[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm31, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm20
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm24, %zmm20
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm4, %zmm16
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm15 = zmm2[1],zmm29[1],zmm2[3],zmm29[3],zmm2[5],zmm29[5],zmm2[7],zmm29[7]
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm11, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm26 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k1} = zmm26[0],zmm0[0],zmm26[2],zmm0[2],zmm26[4],zmm0[4],zmm26[6],zmm0[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm26[1],zmm0[1],zmm26[3],zmm0[3],zmm26[5],zmm0[5],zmm26[7],zmm0[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm31, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm24, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm21
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm9, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %ymm1, %ymm27
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm31, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm24, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm19
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm13 = zmm30[1],zmm6[1],zmm30[3],zmm6[3],zmm30[5],zmm6[5],zmm30[7],zmm6[7]
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm11, %zmm30
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm19 {%k1} = zmm22[0],zmm1[0],zmm22[2],zmm1[2],zmm22[4],zmm1[4],zmm22[6],zmm1[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm30 {%k1} = zmm22[1],zmm1[1],zmm22[3],zmm1[3],zmm22[5],zmm1[5],zmm22[7],zmm1[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm31, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm24, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm25
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm31, %zmm25
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm31
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm28
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm24, %zmm28
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm3, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm3[1],zmm2[1],zmm3[3],zmm2[3],zmm3[5],zmm2[5],zmm3[7],zmm2[7]
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k1} = zmm7[0],zmm1[0],zmm7[2],zmm1[2],zmm7[4],zmm1[4],zmm7[6],zmm1[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm7[1],zmm1[1],zmm7[3],zmm1[3],zmm7[5],zmm1[5],zmm7[7],zmm1[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm28 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm19
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm12
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm27
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,12]
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm4, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm14
-; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm7, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm28, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm14, %zmm0, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm10
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm25
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm0, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm14
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm15
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm16
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm11
+; AVX512F-NEXT:    vpermi2q %zmm16, %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm3
+; AVX512F-NEXT:    vpermi2q %zmm17, %zmm3, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm2, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,13]
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm0, %zmm7
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm2, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm6, %zmm7
-; AVX512F-NEXT:    vpermi2q %zmm14, %zmm0, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm0, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm8, %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} ymm18 = [7,15,7,15]
+; AVX512F-NEXT:    # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm18, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm11, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm11, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm15 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm13 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm2, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm8, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm18, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm16, %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm8, %zmm12
+; AVX512F-NEXT:    vpermi2q %zmm16, %zmm11, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm18, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm18, %zmm14
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm9, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm13, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8 {%k1}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm15, %zmm6, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm30
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,6,14]
-; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm2, %zmm5
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,7,15]
-; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm2, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm9
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm8
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm2, %zmm8
-; AVX512F-NEXT:    vpermi2q %zmm15, %zmm6, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm21
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm16, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm12 {%k1}
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm4, %ymm4
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512F-NEXT:    vinserti128 $1, 128(%rdi), %ymm5, %ymm5
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm12, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm18 {%k1}
-; AVX512F-NEXT:    vmovdqa 576(%rdi), %xmm8
-; AVX512F-NEXT:    vinserti128 $1, 704(%rdi), %ymm8, %ymm8
-; AVX512F-NEXT:    vmovdqa 512(%rdi), %xmm10
-; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm10, %ymm10
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm18, %zmm11
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
-; AVX512F-NEXT:    vmovdqa 1088(%rdi), %xmm12
-; AVX512F-NEXT:    vinserti128 $1, 1216(%rdi), %ymm12, %ymm12
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %xmm16
-; AVX512F-NEXT:    vinserti32x4 $1, 1152(%rdi), %ymm16, %ymm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm22 = ymm16[0],ymm12[0],ymm16[2],ymm12[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm22, %zmm13, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm31 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %xmm25
-; AVX512F-NEXT:    vinserti32x4 $1, 1728(%rdi), %ymm25, %ymm25
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %xmm26
-; AVX512F-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm26, %ymm26
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm26[0],ymm25[0],ymm26[2],ymm25[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm31, %zmm27
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm14 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm14, %zmm29
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm13 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm16[1],ymm12[1],ymm16[3],ymm12[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm20 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm20, %zmm8
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm17 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm17, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm24 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm26[1],ymm25[1],ymm26[3],ymm25[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm24, %zmm5
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm19, %zmm1
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm23, %zmm2
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm10 = mem[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm10, %zmm13, %zmm10
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm13 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm13 = mem[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm9, %zmm13
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm14 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm14 = mem[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm3, %zmm14
-; AVX512F-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm15 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm15 = mem[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm14, %zmm19
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k1}
+; AVX512F-NEXT:    vpermi2q %zmm17, %zmm3, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm14
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [6,14]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm10, %zmm9
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm16 = [7,15]
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm16, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm10, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm16, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm10, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm17, %zmm3, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm16, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm16, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm15, %zmm15
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm22
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k1}
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm5, %ymm5
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512F-NEXT:    vinserti128 $1, 128(%rdi), %ymm9, %ymm9
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm9[0],ymm5[0],ymm9[2],ymm5[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm16, %zmm12
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm20 {%k1}
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %xmm16
+; AVX512F-NEXT:    vinserti32x4 $1, 704(%rdi), %ymm16, %ymm16
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %xmm17
+; AVX512F-NEXT:    vinserti32x4 $1, 640(%rdi), %ymm17, %ymm17
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm18 = ymm17[0],ymm16[0],ymm17[2],ymm16[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm18, %zmm20, %zmm18
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm24 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %xmm20
+; AVX512F-NEXT:    vinserti32x4 $1, 1216(%rdi), %ymm20, %ymm20
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %xmm21
+; AVX512F-NEXT:    vinserti32x4 $1, 1152(%rdi), %ymm21, %ymm21
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm21[0],ymm20[0],ymm21[2],ymm20[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm23, %zmm24, %zmm23
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm28 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %xmm24
+; AVX512F-NEXT:    vinserti32x4 $1, 1728(%rdi), %ymm24, %ymm24
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %xmm25
+; AVX512F-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm25, %ymm25
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm24[0],ymm25[2],ymm24[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm28, %zmm27
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm28 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm20 = ymm21[1],ymm20[1],ymm21[3],ymm20[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm20, %zmm28, %zmm20
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm21 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm16 = ymm17[1],ymm16[1],ymm17[3],ymm16[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm16, %zmm21, %zmm16
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm9[1],ymm5[1],ymm9[3],ymm5[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm17, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm25[1],ymm24[1],ymm25[3],ymm24[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm17, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm17, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm10, %zmm7
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm11[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm3, %zmm15
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm6, %zmm3
 ; AVX512F-NEXT:    vmovdqa64 %zmm27, 192(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 128(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 64(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 64(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 128(%rdx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 192(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, (%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 64(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 128(%rcx)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 192(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, (%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 64(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 128(%r8)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm3, 192(%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, (%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 64(%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm4, 128(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 128(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 192(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 128(%rdx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 192(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, (%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 64(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 128(%rcx)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 192(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, (%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 64(%r8)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 128(%r8)
+; AVX512F-NEXT:    vmovups (%rsp), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 192(%r9)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, (%r9)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 64(%r9)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm5, 128(%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm30, 192(%rax)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm3, (%rax)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm3, 64(%rax)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm3, 128(%rax)
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm21, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm29, 64(%rax)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 128(%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, 64(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512F-NEXT:    vmovaps %zmm10, 64(%rax)
-; AVX512F-NEXT:    addq $2632, %rsp # imm = 0xA48
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512F-NEXT:    addq $2696, %rsp # imm = 0xA88
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i64_stride8_vf32:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $2632, %rsp # imm = 0xA48
+; AVX512BW-NEXT:    subq $2696, %rsp # imm = 0xA88
 ; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm20
-; AVX512BW-NEXT:    movb $-64, %al
-; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %ymm29
-; AVX512BW-NEXT:    vmovdqa 1152(%rdi), %ymm13
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm13[0],ymm29[0],ymm13[2],ymm29[2]
-; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %ymm6
-; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %ymm4
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm9[2,3],ymm5[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqa 704(%rdi), %ymm8
-; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm9
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[2],ymm8[2]
-; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm12
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %ymm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm16[0],ymm12[0],ymm16[2],ymm12[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm11[2,3],ymm10[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm5, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm5
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm25
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm25[0],ymm5[0],ymm25[2],ymm5[2]
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm26
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm27
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm27[0],ymm26[0],ymm27[2],ymm26[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm15[2,3],ymm11[2,3]
-; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm15, %zmm10, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm23, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa 1728(%rdi), %ymm10
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %ymm28
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm28[0],ymm10[0],ymm28[2],ymm10[2]
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %ymm31
-; AVX512BW-NEXT:    vmovdqa 1536(%rdi), %ymm2
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm14 = ymm2[0],ymm31[0],ymm2[2],ymm31[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm14[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm13[1],ymm29[1],ymm13[3],ymm29[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm14, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm9[1],ymm8[1],ymm9[3],ymm8[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm16[1],ymm12[1],ymm16[3],ymm12[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm25[1],ymm5[1],ymm25[3],ymm5[3]
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm5
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm27[1],ymm26[1],ymm27[3],ymm26[3]
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm8
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm9
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm11, %zmm23, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm28[1],ymm10[1],ymm28[3],ymm10[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],ymm31[1],ymm2[3],ymm31[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm18[0],zmm21[0],zmm18[2],zmm21[2],zmm18[4],zmm21[4],zmm18[6],zmm21[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm21
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm2, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm0, %zmm13
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm6[0],zmm29[0],zmm6[2],zmm29[2],zmm6[4],zmm29[4],zmm6[6],zmm29[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm20
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm30[0],zmm14[0],zmm30[2],zmm14[2],zmm30[4],zmm14[4],zmm30[6],zmm14[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm2, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm28
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm7[0],zmm11[0],zmm7[2],zmm11[2],zmm7[4],zmm11[4],zmm7[6],zmm11[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm2, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm27, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm27, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm27, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm2, %zmm27
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm0[0],zmm21[0],zmm0[2],zmm21[2],zmm0[4],zmm21[4],zmm0[6],zmm21[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm4[0],zmm16[0],zmm4[2],zmm16[2],zmm4[4],zmm16[4],zmm4[6],zmm16[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm2[0],zmm14[0],zmm2[2],zmm14[2],zmm2[4],zmm14[4],zmm2[6],zmm14[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm0[1],zmm21[1],zmm0[3],zmm21[3],zmm0[5],zmm21[5],zmm0[7],zmm21[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm4[1],zmm16[1],zmm4[3],zmm16[3],zmm4[5],zmm16[5],zmm4[7],zmm16[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm8
+; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm2, %zmm6
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm9 = zmm2[1],zmm14[1],zmm2[3],zmm14[3],zmm2[5],zmm14[5],zmm2[7],zmm14[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 = zmm3[0],zmm1[0],zmm3[2],zmm1[2],zmm3[4],zmm1[4],zmm3[6],zmm1[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm3[1],zmm1[1],zmm3[3],zmm1[3],zmm3[5],zmm1[5],zmm3[7],zmm1[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    movb $-64, %al
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm0
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm0[0],zmm2[0],zmm0[2],zmm2[2],zmm0[4],zmm2[4],zmm0[6],zmm2[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm0[1],zmm2[1],zmm0[3],zmm2[3],zmm0[5],zmm2[5],zmm0[7],zmm2[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm31, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm31, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm23
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm26
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [5,13,5,13,5,13,5,13]
 ; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm24, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm24, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm21
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k1} = zmm21[0],zmm2[0],zmm21[2],zmm2[2],zmm21[4],zmm2[4],zmm21[6],zmm2[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm21[1],zmm2[1],zmm21[3],zmm2[3],zmm21[5],zmm2[5],zmm21[7],zmm2[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm31, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm3
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm18[1],zmm21[1],zmm18[3],zmm21[3],zmm18[5],zmm21[5],zmm18[7],zmm21[7]
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm18
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm25[0],zmm17[0],zmm25[2],zmm17[2],zmm25[4],zmm17[4],zmm25[6],zmm17[6]
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm18 {%k1} = zmm25[1],zmm17[1],zmm25[3],zmm17[3],zmm25[5],zmm17[5],zmm25[7],zmm17[7]
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm24, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm18
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm18[0],zmm0[0],zmm18[2],zmm0[2],zmm18[4],zmm0[4],zmm18[6],zmm0[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm18[1],zmm0[1],zmm18[3],zmm0[3],zmm18[5],zmm0[5],zmm18[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm21
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm31, %zmm18
 ; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm31, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm24, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [5,13,5,13,5,13,5,13]
-; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm5, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm5, %zmm3
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm30, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm24, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm31
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm30
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm8
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm29
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm24
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm14 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %ymm19
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %ymm18
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm18[0],ymm19[0],ymm18[2],ymm19[2]
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %ymm17
+; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %ymm10
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm17[0],ymm10[2],ymm17[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm15[2,3],ymm9[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %ymm16
+; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm9
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm9[0],ymm16[0],ymm9[2],ymm16[2]
+; AVX512BW-NEXT:    vmovdqa 576(%rdi), %ymm11
+; AVX512BW-NEXT:    vmovdqa 512(%rdi), %ymm7
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm14 = ymm7[0],ymm11[0],ymm7[2],ymm11[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm12, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm5
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm14
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm14[0],ymm5[0],ymm14[2],ymm5[2]
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm6
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm15[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm31, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm24, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm4, %zmm16
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm15 = zmm2[1],zmm29[1],zmm2[3],zmm29[3],zmm2[5],zmm29[5],zmm2[7],zmm29[7]
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm11, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm26 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k1} = zmm26[0],zmm0[0],zmm26[2],zmm0[2],zmm26[4],zmm0[4],zmm26[6],zmm0[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm26[1],zmm0[1],zmm26[3],zmm0[3],zmm26[5],zmm0[5],zmm26[7],zmm0[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm31, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm24, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %ymm1, %ymm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm24, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm19
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm13 = zmm30[1],zmm6[1],zmm30[3],zmm6[3],zmm30[5],zmm6[5],zmm30[7],zmm6[7]
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm30
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm19 {%k1} = zmm22[0],zmm1[0],zmm22[2],zmm1[2],zmm22[4],zmm1[4],zmm22[6],zmm1[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm30 {%k1} = zmm22[1],zmm1[1],zmm22[3],zmm1[3],zmm22[5],zmm1[5],zmm22[7],zmm1[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm31, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm25
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm31, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm28
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm28
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm3[1],zmm2[1],zmm3[3],zmm2[3],zmm3[5],zmm2[5],zmm3[7],zmm2[7]
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k1} = zmm7[0],zmm1[0],zmm7[2],zmm1[2],zmm7[4],zmm1[4],zmm7[6],zmm1[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm7[1],zmm1[1],zmm7[3],zmm1[3],zmm7[5],zmm1[5],zmm7[7],zmm1[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa 1728(%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 1664(%rdi), %ymm13
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm13[0],ymm4[0],ymm13[2],ymm4[2]
+; AVX512BW-NEXT:    vmovdqa 1600(%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 1536(%rdi), %ymm1
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm12[2,3],ymm15[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm18[1],ymm19[1],ymm18[3],ymm19[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm10 = ymm10[1],ymm17[1],ymm10[3],ymm17[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm10[2,3],ymm12[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm0, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm7
-; AVX512BW-NEXT:    vpermi2q %zmm14, %zmm0, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm11, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],ymm16[1],ymm9[3],ymm16[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm7[1],ymm11[1],ymm7[3],ymm11[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm15 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm14[1],ymm5[1],ymm14[3],ymm5[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],ymm6[1],ymm2[3],ymm6[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm2[2,3],ymm5[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm13 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm13, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm27 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm13[1],ymm4[1],ymm13[3],ymm4[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm3[1],ymm1[3],ymm3[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8 {%k1}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm15, %zmm6, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm30
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,6,14]
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm28 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm19
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm12
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,12,4,12]
 ; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm2, %zmm5
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,7,15]
-; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,12]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm28, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm8
-; AVX512BW-NEXT:    vpermi2q %zmm15, %zmm6, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm21
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm16, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm12 {%k1}
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm4, %ymm4
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512BW-NEXT:    vinserti128 $1, 128(%rdi), %ymm5, %ymm5
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm12, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm18 {%k1}
-; AVX512BW-NEXT:    vmovdqa 576(%rdi), %xmm8
-; AVX512BW-NEXT:    vinserti128 $1, 704(%rdi), %ymm8, %ymm8
-; AVX512BW-NEXT:    vmovdqa 512(%rdi), %xmm10
-; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm10, %ymm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm18, %zmm11
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k1}
-; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %xmm12
-; AVX512BW-NEXT:    vinserti128 $1, 1216(%rdi), %ymm12, %ymm12
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %xmm16
-; AVX512BW-NEXT:    vinserti32x4 $1, 1152(%rdi), %ymm16, %ymm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm22 = ymm16[0],ymm12[0],ymm16[2],ymm12[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm22, %zmm13, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm31 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %xmm25
-; AVX512BW-NEXT:    vinserti32x4 $1, 1728(%rdi), %ymm25, %ymm25
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %xmm26
-; AVX512BW-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm26, %ymm26
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm26[0],ymm25[0],ymm26[2],ymm25[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm31, %zmm27
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm10
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm25
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm14
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm15
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm11
+; AVX512BW-NEXT:    vpermi2q %zmm16, %zmm11, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm17, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,13]
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm18 = [7,15,7,15]
+; AVX512BW-NEXT:    # ymm18 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm18, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm8, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm18, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm16, %zmm11, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm8, %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm16, %zmm11, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm18, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm18, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm14 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm14, %zmm29
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm13 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm16[1],ymm12[1],ymm16[3],ymm12[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm13, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm20 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm20, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm17 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm17, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm24 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm26[1],ymm25[1],ymm26[3],ymm25[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm24, %zmm5
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm19, %zmm1
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm23, %zmm2
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm10 = mem[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm10, %zmm13, %zmm10
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm13 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm13 = mem[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm9, %zmm13
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm14 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm3, %zmm14
-; AVX512BW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm15 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm15 = mem[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm14, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k1}
+; AVX512BW-NEXT:    vpermi2q %zmm17, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [6,14]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm10, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = [7,15]
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm16, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm10, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm16, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm17, %zmm3, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm16, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm15, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k1}
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm5, %ymm5
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, 128(%rdi), %ymm9, %ymm9
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm9[0],ymm5[0],ymm9[2],ymm5[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm16, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm20 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %xmm16
+; AVX512BW-NEXT:    vinserti32x4 $1, 704(%rdi), %ymm16, %ymm16
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %xmm17
+; AVX512BW-NEXT:    vinserti32x4 $1, 640(%rdi), %ymm17, %ymm17
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm18 = ymm17[0],ymm16[0],ymm17[2],ymm16[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm18, %zmm20, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm24 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %xmm20
+; AVX512BW-NEXT:    vinserti32x4 $1, 1216(%rdi), %ymm20, %ymm20
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %xmm21
+; AVX512BW-NEXT:    vinserti32x4 $1, 1152(%rdi), %ymm21, %ymm21
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm21[0],ymm20[0],ymm21[2],ymm20[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm23, %zmm24, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm28 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %xmm24
+; AVX512BW-NEXT:    vinserti32x4 $1, 1728(%rdi), %ymm24, %ymm24
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %xmm25
+; AVX512BW-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm25, %ymm25
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm24[0],ymm25[2],ymm24[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm28, %zmm27
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm28 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm20 = ymm21[1],ymm20[1],ymm21[3],ymm20[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm20, %zmm28, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm21 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm16 = ymm17[1],ymm16[1],ymm17[3],ymm16[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm16, %zmm21, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm9[1],ymm5[1],ymm9[3],ymm5[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm17, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm25[1],ymm24[1],ymm25[3],ymm24[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm17, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm17, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm10, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm11[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm15, %zmm3, %zmm15
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm6, %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 %zmm27, 192(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 128(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 64(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 192(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 128(%rdx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 192(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, (%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 64(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 128(%rcx)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 192(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, (%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 64(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 128(%r8)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm3, 192(%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, (%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 64(%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm4, 128(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 128(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 192(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 128(%rdx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 192(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, (%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 64(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 128(%rcx)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 192(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, (%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 64(%r8)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 128(%r8)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 192(%r9)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, (%r9)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 64(%r9)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm5, 128(%r9)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, 192(%rax)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm3, (%rax)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm3, 64(%rax)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm3, 128(%rax)
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, 64(%rax)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 128(%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, 64(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512BW-NEXT:    vmovaps %zmm10, 64(%rax)
-; AVX512BW-NEXT:    addq $2632, %rsp # imm = 0xA48
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512BW-NEXT:    addq $2696, %rsp # imm = 0xA88
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <256 x i64>, ptr %in.vec, align 64
@@ -9123,813 +9113,828 @@ define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-LABEL: load_i64_stride8_vf64:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $6600, %rsp # imm = 0x19C8
-; AVX512F-NEXT:    vmovdqa64 3392(%rdi), %zmm13
-; AVX512F-NEXT:    vmovdqa64 3328(%rdi), %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3520(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3456(%rdi), %zmm10
-; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm17
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    subq $6920, %rsp # imm = 0x1B08
+; AVX512F-NEXT:    vmovdqa64 3392(%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 3328(%rdi), %zmm14
+; AVX512F-NEXT:    vmovdqa64 3520(%rdi), %zmm11
+; AVX512F-NEXT:    vmovdqa64 3456(%rdi), %zmm12
+; AVX512F-NEXT:    vmovdqa64 1984(%rdi), %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 832(%rdi), %zmm8
 ; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm16
+; AVX512F-NEXT:    vmovdqa64 768(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 960(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 896(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm16
 ; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    movb $-64, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm29
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm5, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa 3264(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa 3264(%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-NEXT:    vmovdqa 3200(%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512F-NEXT:    vmovdqa 3136(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa 3072(%rdi), %ymm14
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm14[0],ymm3[0],ymm14[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX512F-NEXT:    vmovdqa 3136(%rdi), %ymm9
+; AVX512F-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa 3072(%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm7
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa 704(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa 640(%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa 704(%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %ymm26
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %ymm23
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm26[0],ymm23[2],ymm26[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %ymm30
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm30[0],ymm0[0],ymm30[2],ymm0[2]
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %ymm28
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %ymm27
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm27[0],ymm28[0],ymm27[2],ymm28[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm30
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm30[0],ymm0[0],ymm30[2],ymm0[2]
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm20
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm16[0],ymm20[0],ymm16[2],ymm20[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm25
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm23
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm25[0],ymm23[2],ymm25[2]
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm21
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm22
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm22[0],ymm21[0],ymm22[2],ymm21[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vmovdqa64 1920(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqa64 1856(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 1792(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa 1728(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa 1664(%rdi), %ymm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %ymm21
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %ymm17
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm17[0],ymm21[0],ymm17[2],ymm21[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %ymm19
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %ymm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm18[0],ymm19[0],ymm18[2],ymm19[2]
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %ymm17
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %ymm24
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm24[0],ymm17[0],ymm24[2],ymm17[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 1472(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 1408(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqa64 1344(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1280(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512F-NEXT:    vmovdqa 1216(%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %ymm25
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm25[0],ymm0[0],ymm25[2],ymm0[2]
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %ymm24
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %ymm22
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm22[0],ymm24[0],ymm22[2],ymm24[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %ymm29
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm0[0],ymm29[2],ymm0[2]
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %ymm31
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %ymm16
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm16[0],ymm31[0],ymm16[2],ymm31[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 3008(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2944(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 2880(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqa64 2880(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2816(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa 2752(%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa 2752(%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-NEXT:    vmovdqa 2688(%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %ymm31
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX512F-NEXT:    vmovdqa 2624(%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-NEXT:    vmovdqa 2560(%rdi), %ymm10
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm10[0],ymm31[0],ymm10[2],ymm31[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm10[0],ymm2[0],ymm10[2],ymm2[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2496(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2432(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 2368(%rdi), %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqa64 2368(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2304(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm2, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa64 2240(%rdi), %ymm28
-; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %ymm19
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm19[0],ymm28[0],ymm19[2],ymm28[2]
-; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %ymm27
+; AVX512F-NEXT:    vmovdqa 2240(%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %ymm26
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm26[0],ymm0[0],ymm26[2],ymm0[2]
+; AVX512F-NEXT:    vmovdqa 2112(%rdi), %ymm8
 ; AVX512F-NEXT:    vmovdqa 2048(%rdi), %ymm6
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm6[0],ymm27[0],ymm6[2],ymm27[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 4032(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 4032(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa64 3968(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm13, %zmm1, %zmm20
+; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 3904(%rdi), %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3968(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 3840(%rdi), %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 3904(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 3840(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqa 3776(%rdi), %ymm12
-; AVX512F-NEXT:    vmovdqa 3712(%rdi), %ymm9
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm9[0],ymm12[0],ymm9[2],ymm12[2]
-; AVX512F-NEXT:    vmovdqa 3648(%rdi), %ymm4
-; AVX512F-NEXT:    vmovdqa 3584(%rdi), %ymm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 3776(%rdi), %ymm20
+; AVX512F-NEXT:    vmovdqa 3712(%rdi), %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm4[0],ymm20[0],ymm4[2],ymm20[2]
+; AVX512F-NEXT:    vmovdqa 3648(%rdi), %ymm3
+; AVX512F-NEXT:    vmovdqa 3584(%rdi), %ymm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm2, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
-; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm14 = ymm14[1],mem[1],ymm14[3],mem[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm15, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm1, %zmm12
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm5, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm15 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm15 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm14 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm14 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm5, %zmm14
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm26[1],ymm23[3],ymm26[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm2, %zmm8
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm2, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm14 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm30, %ymm0 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm0 = ymm30[1],mem[1],ymm30[3],mem[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm16[1],ymm20[1],ymm16[3],ymm20[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm28[1],ymm27[3],ymm28[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm14 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm14
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm2, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm5, %zmm14
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm17[1],ymm21[1],ymm17[3],ymm21[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm23[1],ymm25[1],ymm23[3],ymm25[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm22[1],ymm21[1],ymm22[3],ymm21[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm14
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm2, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm5, %zmm14
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm25, %ymm0 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm0 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm22[1],ymm24[1],ymm22[3],ymm24[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm18[1],ymm19[1],ymm18[3],ymm19[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm24[1],ymm17[1],ymm24[3],ymm17[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm5, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm29, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm29[1],mem[1],ymm29[3],mem[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm16[1],ymm31[1],ymm16[3],ymm31[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm31, %zmm5, %zmm14
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm10 = ymm10[1],ymm31[1],ymm10[3],ymm31[3]
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm10 = ymm10[1],mem[1],ymm10[3],mem[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm10[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm0 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm2, %zmm10
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm5, %zmm10
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm19[1],ymm28[1],ymm19[3],ymm28[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm6[1],ymm27[1],ymm6[3],ymm27[3]
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm26, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm26[1],mem[1],ymm26[3],mem[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],ymm0[2,3]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm5, %zmm19, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm9[1],ymm12[1],ymm9[3],ymm12[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
+; AVX512F-NEXT:    vpermi2q %zmm13, %zmm10, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm26, %zmm13, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm4[1],ymm20[1],ymm4[3],ymm20[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm27[0],zmm12[0],zmm27[2],zmm12[2],zmm27[4],zmm12[4],zmm27[6],zmm12[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqa64 3136(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3072(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 3264(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm3 = zmm3[0],mem[0],zmm3[2],mem[2],zmm3[4],mem[4],zmm3[6],mem[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
+; AVX512F-NEXT:    vmovdqa64 3264(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 3200(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12]
 ; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm7[0],zmm13[0],zmm7[2],zmm13[2],zmm7[4],zmm13[4],zmm7[6],zmm13[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm1, %zmm4
+; AVX512F-NEXT:    vmovdqa64 3136(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 3072(%rdi), %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,12]
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm3, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm12
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm9[0],zmm11[0],zmm9[2],zmm11[2],zmm9[4],zmm11[4],zmm9[6],zmm11[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 704(%rdi), %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm29[0],zmm26[0],zmm29[2],zmm26[2],zmm29[4],zmm26[4],zmm29[6],zmm26[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm23
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqa64 640(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqa64 576(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 512(%rdi), %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm1, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm26
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm2
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm8[0],zmm16[0],zmm8[2],zmm16[2],zmm8[4],zmm16[4],zmm8[6],zmm16[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm30
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm9[0],zmm11[0],zmm9[2],zmm11[2],zmm9[4],zmm11[4],zmm9[6],zmm11[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm16
-; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm1, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm0, %zmm2
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm24[0],zmm15[0],zmm24[2],zmm15[2],zmm24[4],zmm15[4],zmm24[6],zmm15[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqa64 2752(%rdi), %zmm14
-; AVX512F-NEXT:    vmovdqa64 2688(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm1, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm25
-; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm28 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm28[0],zmm30[0],zmm28[2],zmm30[2],zmm28[4],zmm30[4],zmm28[6],zmm30[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm11
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm27
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm2, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm3
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm25[0],zmm23[0],zmm25[2],zmm23[2],zmm25[4],zmm23[4],zmm25[6],zmm23[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1728(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1664(%rdi), %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm3
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm24[0],zmm19[0],zmm24[2],zmm19[2],zmm24[4],zmm19[4],zmm24[6],zmm19[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1216(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1152(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqa64 1088(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 1024(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm3
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm29[0],zmm31[0],zmm29[2],zmm31[2],zmm29[4],zmm31[4],zmm29[6],zmm31[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 2752(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2688(%rdi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm1, %zmm3
+; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqa64 2240(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 2176(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm31 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm4[0],zmm8[0],zmm4[2],zmm8[2],zmm4[4],zmm8[4],zmm4[6],zmm8[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512F-NEXT:    vmovdqa64 3648(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 3584(%rdi), %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm2, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm7[0],zmm14[0],zmm7[2],zmm14[2],zmm7[4],zmm14[4],zmm7[6],zmm14[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqa64 3776(%rdi), %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 3712(%rdi), %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm18
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm1 = zmm19[0],zmm28[0],zmm19[2],zmm28[2],zmm19[4],zmm28[4],zmm19[6],zmm28[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqa64 3648(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 3584(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm4, %zmm3, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm31, %zmm10, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm25
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm13[0],zmm26[0],zmm13[2],zmm26[2],zmm13[4],zmm26[4],zmm13[6],zmm26[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,13]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm27[1],zmm12[1],zmm27[3],zmm12[3],zmm27[5],zmm12[5],zmm27[7],zmm12[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm1, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm2
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm24[1],zmm20[1],zmm24[3],zmm20[3],zmm24[5],zmm20[5],zmm24[7],zmm20[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm23
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm31[1],zmm13[1],zmm31[3],zmm13[3],zmm31[5],zmm13[5],zmm31[7],zmm13[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm1, %zmm5
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm4 # 64-byte Folded Reload
-; AVX512F-NEXT:    # zmm4 = zmm29[1],mem[1],zmm29[3],mem[3],zmm29[5],mem[5],zmm29[7],mem[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm29, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm18, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm4 # 64-byte Folded Reload
-; AVX512F-NEXT:    # zmm4 = zmm17[1],mem[1],zmm17[3],mem[3],zmm17[5],mem[5],zmm17[7],mem[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm2
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm5 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm5 = zmm17[1],mem[1],zmm17[3],mem[3],zmm17[5],mem[5],zmm17[7],mem[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm11, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm12 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm19
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm28[1],zmm30[1],zmm28[3],zmm30[3],zmm28[5],zmm30[5],zmm28[7],zmm30[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm8
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm5 = zmm5[1],mem[1],zmm5[3],mem[3],zmm5[5],mem[5],zmm5[7],mem[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm29
-; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm16
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm9[1],zmm11[1],zmm9[3],zmm11[3],zmm9[5],zmm11[5],zmm9[7],zmm11[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm5 = zmm5[1],mem[1],zmm5[3],mem[3],zmm5[5],mem[5],zmm5[7],mem[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm5 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm5 = zmm11[1],mem[1],zmm11[3],mem[3],zmm11[5],mem[5],zmm11[7],mem[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
-; AVX512F-NEXT:    # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm8[1],zmm4[3],zmm8[3],zmm4[5],zmm8[5],zmm4[7],zmm8[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm5 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm5 = zmm7[1],mem[1],zmm7[3],mem[3],zmm7[5],mem[5],zmm7[7],mem[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-NEXT:    vpermi2q %zmm6, %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm25[1],zmm18[1],zmm25[3],zmm18[3],zmm25[5],zmm18[5],zmm25[7],zmm18[7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm31, %zmm7, %zmm2
+; AVX512F-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm1 = zmm25[1],mem[1],zmm25[3],mem[3],zmm25[5],mem[5],zmm25[7],mem[7]
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm3 {%k1} # 64-byte Folded Reload
-; AVX512F-NEXT:    # zmm3 {%k1} = zmm27[0],mem[0],zmm27[2],mem[2],zmm27[4],mem[4],zmm27[6],mem[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,14]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm0, %zmm31
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm31 {%k1} # 64-byte Folded Reload
-; AVX512F-NEXT:    # zmm31 {%k1} = zmm24[0],mem[0],zmm24[2],mem[2],zmm24[4],mem[4],zmm24[6],mem[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm31, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm1, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm23
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm4 {%k1} # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm4 {%k1} = zmm20[0],mem[0],zmm20[2],mem[2],zmm20[4],mem[4],zmm20[6],mem[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm14[0],zmm28[0],zmm14[2],zmm28[2],zmm14[4],zmm28[4],zmm14[6],zmm28[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm30, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm29, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm24
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm21[0],zmm18[0],zmm21[2],zmm18[2],zmm21[4],zmm18[4],zmm21[6],zmm18[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm18
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm4 {%k1} # 64-byte Folded Reload
+; AVX512F-NEXT:    # zmm4 {%k1} = zmm17[0],mem[0],zmm17[2],mem[2],zmm17[4],mem[4],zmm17[6],mem[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm30[0],zmm27[0],zmm30[2],zmm27[2],zmm30[4],zmm27[4],zmm30[6],zmm27[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm1, %zmm15
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm14[0],zmm30[0],zmm14[2],zmm30[2],zmm14[4],zmm30[4],zmm14[6],zmm30[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm15[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm31[0],zmm16[0],zmm31[2],zmm16[2],zmm31[4],zmm16[4],zmm31[6],zmm16[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm11
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm13 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm14[0],zmm21[0],zmm14[2],zmm21[2],zmm14[4],zmm21[4],zmm14[6],zmm21[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm1, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm28[0],zmm22[0],zmm28[2],zmm22[2],zmm28[4],zmm22[4],zmm28[6],zmm22[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm1, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm3 {%k1} # 64-byte Folded Reload
-; AVX512F-NEXT:    # zmm3 {%k1} = zmm12[0],mem[0],zmm12[2],mem[2],zmm12[4],mem[4],zmm12[6],mem[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm10, %zmm1, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm21[0],zmm26[2],zmm21[2],zmm26[4],zmm21[4],zmm26[6],zmm21[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm8
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm10[0],zmm12[0],zmm10[2],zmm12[2],zmm10[4],zmm12[4],zmm10[6],zmm12[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm6, %zmm14, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm3, %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm4, %zmm5, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm3, %zmm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k1} = zmm9[0],zmm5[0],zmm9[2],zmm5[2],zmm9[4],zmm5[4],zmm9[6],zmm5[6]
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm6, %zmm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k1} = zmm7[0],zmm31[0],zmm7[2],zmm31[2],zmm7[4],zmm31[4],zmm7[6],zmm31[6]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm17, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,1,9,1,9,1,9]
 ; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm23, %zmm4, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm3[1],zmm24[3],zmm3[3],zmm24[5],zmm3[5],zmm24[7],zmm3[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm24
-; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9]
-; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm23, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm14, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm23, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm15[1],zmm28[1],zmm15[3],zmm28[3],zmm15[5],zmm28[5],zmm15[7],zmm28[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm4, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm14, %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm23, %zmm6
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm17, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm4, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm0, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm27, %zmm17, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm27
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm17, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm15, %zmm0, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm17, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm0, %zmm24
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm17, %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm0, %zmm20
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm31
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm17, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm25
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm6, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm19
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm4, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm14, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm23, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm19
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm19 {%k1} = zmm9[1],zmm0[1],zmm9[3],zmm0[3],zmm9[5],zmm0[5],zmm9[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm29
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm4, %zmm19
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm19 {%k1} = zmm30[1],zmm27[1],zmm30[3],zmm27[3],zmm30[5],zmm27[5],zmm30[7],zmm27[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm14, %zmm30
-; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm23, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm23[1],zmm0[1],zmm23[3],zmm0[3],zmm23[5],zmm0[5],zmm23[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm23
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm14, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm23, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm20
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm20 {%k1} = zmm31[1],zmm16[1],zmm31[3],zmm16[3],zmm31[5],zmm16[5],zmm31[7],zmm16[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm14, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm23, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm14, %zmm28
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm23, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm31
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm4, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm11 {%k1} = zmm3[1],zmm0[1],zmm3[3],zmm0[3],zmm3[5],zmm0[5],zmm3[7],zmm0[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm23, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm14, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm13, %zmm23, %zmm31
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm26[1],zmm8[1],zmm26[3],zmm8[3],zmm26[5],zmm8[5],zmm26[7],zmm8[7]
-; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm14, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm23, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm14, %zmm20
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm23, %zmm25
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm18 {%k1} = zmm3[1],zmm0[1],zmm3[3],zmm0[3],zmm3[5],zmm0[5],zmm3[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm18, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm16
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm14, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm0, %zmm14
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm23, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm23, %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm23, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm18, %zmm0, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm4, %zmm0
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm9[1],zmm5[1],zmm9[3],zmm5[3],zmm9[5],zmm5[5],zmm9[7],zmm5[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm4, %zmm30
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm30 {%k1} = zmm6[1],zmm3[1],zmm6[3],zmm3[3],zmm6[5],zmm3[5],zmm6[7],zmm3[7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm7 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm8 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm10 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm11 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm12 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm13 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm27 {%k1} = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm26 {%k1} = zmm21[1],zmm0[1],zmm21[3],zmm0[3],zmm21[5],zmm0[5],zmm21[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm24 {%k1} = zmm28[1],zmm22[1],zmm28[3],zmm22[3],zmm28[5],zmm22[5],zmm28[7],zmm22[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm15, %zmm28
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm16, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm20 {%k1} = zmm10[1],zmm12[1],zmm10[3],zmm12[3],zmm10[5],zmm12[5],zmm10[7],zmm12[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm15, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm12, %zmm16, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm15 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm15
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm16
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm25 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,7,15]
 ; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [7,15]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
@@ -9942,160 +9947,155 @@ define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm0
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm26, %zmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm13
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm6[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm25, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm15
-; AVX512F-NEXT:    vinserti128 $1, 128(%rdi), %ymm15, %ymm15
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm30 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm30, %zmm24, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm3
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT:    vinserti128 $1, 128(%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm1, %zmm18
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
-; AVX512F-NEXT:    vmovdqa64 576(%rdi), %xmm24
-; AVX512F-NEXT:    vinserti32x4 $1, 704(%rdi), %ymm24, %ymm30
-; AVX512F-NEXT:    vmovdqa64 512(%rdi), %xmm24
-; AVX512F-NEXT:    vinserti32x4 $1, 640(%rdi), %ymm24, %ymm24
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm24[0],ymm30[0],ymm24[2],ymm30[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm1, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k1}
-; AVX512F-NEXT:    vmovdqa 1088(%rdi), %xmm9
-; AVX512F-NEXT:    vinserti32x4 $1, 1216(%rdi), %ymm9, %ymm27
-; AVX512F-NEXT:    vmovdqa 1024(%rdi), %xmm9
-; AVX512F-NEXT:    vinserti128 $1, 1152(%rdi), %ymm9, %ymm9
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm9[0],ymm27[0],ymm9[2],ymm27[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm28, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa 576(%rdi), %xmm7
+; AVX512F-NEXT:    vinserti32x4 $1, 704(%rdi), %ymm7, %ymm27
+; AVX512F-NEXT:    vmovdqa 512(%rdi), %xmm7
+; AVX512F-NEXT:    vinserti128 $1, 640(%rdi), %ymm7, %ymm7
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm20 = ymm7[0],ymm27[0],ymm7[2],ymm27[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm20, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k1}
-; AVX512F-NEXT:    vmovdqa 1600(%rdi), %xmm4
-; AVX512F-NEXT:    vinserti128 $1, 1728(%rdi), %ymm4, %ymm4
-; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %xmm17
-; AVX512F-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm17, %ymm17
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm19 = ymm17[0],ymm4[0],ymm17[2],ymm4[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm19, %zmm29, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k1}
-; AVX512F-NEXT:    vmovdqa64 2112(%rdi), %xmm19
-; AVX512F-NEXT:    vinserti32x4 $1, 2240(%rdi), %ymm19, %ymm19
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa 1088(%rdi), %xmm4
+; AVX512F-NEXT:    vinserti32x4 $1, 1216(%rdi), %ymm4, %ymm20
+; AVX512F-NEXT:    vmovdqa 1024(%rdi), %xmm4
+; AVX512F-NEXT:    vinserti128 $1, 1152(%rdi), %ymm4, %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm4[0],ymm20[0],ymm4[2],ymm20[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm23, %zmm1, %zmm21
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa64 1600(%rdi), %xmm23
+; AVX512F-NEXT:    vinserti32x4 $1, 1728(%rdi), %ymm23, %ymm23
+; AVX512F-NEXT:    vmovdqa64 1536(%rdi), %xmm30
+; AVX512F-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm30, %ymm30
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm26 = ymm30[0],ymm23[0],ymm30[2],ymm23[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm26, %zmm1, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm29 {%k1}
+; AVX512F-NEXT:    vmovdqa 2112(%rdi), %xmm6
+; AVX512F-NEXT:    vinserti128 $1, 2240(%rdi), %ymm6, %ymm6
 ; AVX512F-NEXT:    vmovdqa64 2048(%rdi), %xmm26
 ; AVX512F-NEXT:    vinserti32x4 $1, 2176(%rdi), %ymm26, %ymm26
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm29 = ymm26[0],ymm19[0],ymm26[2],ymm19[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm29, %zmm20, %zmm5
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm21 {%k1}
-; AVX512F-NEXT:    vmovdqa 2624(%rdi), %xmm12
-; AVX512F-NEXT:    vinserti128 $1, 2752(%rdi), %ymm12, %ymm12
-; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %xmm29
-; AVX512F-NEXT:    vinserti32x4 $1, 2688(%rdi), %ymm29, %ymm29
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm18 = ymm29[0],ymm12[0],ymm29[2],ymm12[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm18, %zmm21, %zmm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm25 = ymm26[0],ymm6[0],ymm26[2],ymm6[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm25, %zmm29, %zmm8
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k1}
-; AVX512F-NEXT:    vmovdqa 3136(%rdi), %xmm6
-; AVX512F-NEXT:    vinserti128 $1, 3264(%rdi), %ymm6, %ymm6
-; AVX512F-NEXT:    vmovdqa64 3072(%rdi), %xmm20
-; AVX512F-NEXT:    vinserti32x4 $1, 3200(%rdi), %ymm20, %ymm20
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm21 = ymm20[0],ymm6[0],ymm20[2],ymm6[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm21, %zmm16, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm28, %zmm1 {%k1}
+; AVX512F-NEXT:    vmovdqa64 2624(%rdi), %xmm25
+; AVX512F-NEXT:    vinserti32x4 $1, 2752(%rdi), %ymm25, %ymm25
+; AVX512F-NEXT:    vmovdqa64 2560(%rdi), %xmm28
+; AVX512F-NEXT:    vinserti32x4 $1, 2688(%rdi), %ymm28, %ymm28
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm28[0],ymm25[0],ymm28[2],ymm25[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm24, %zmm1, %zmm24
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k1}
-; AVX512F-NEXT:    vmovdqa 3648(%rdi), %xmm7
-; AVX512F-NEXT:    vinserti128 $1, 3776(%rdi), %ymm7, %ymm7
-; AVX512F-NEXT:    vmovdqa64 3584(%rdi), %xmm21
-; AVX512F-NEXT:    vinserti32x4 $1, 3712(%rdi), %ymm21, %ymm21
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm28 = ymm21[0],ymm7[0],ymm21[2],ymm7[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm28, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqa 3136(%rdi), %xmm9
+; AVX512F-NEXT:    vinserti128 $1, 3264(%rdi), %ymm9, %ymm9
+; AVX512F-NEXT:    vmovdqa64 3072(%rdi), %xmm29
+; AVX512F-NEXT:    vinserti32x4 $1, 3200(%rdi), %ymm29, %ymm29
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm22 = ymm29[0],ymm9[0],ymm29[2],ymm9[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm22, %zmm31, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm17 {%k1}
+; AVX512F-NEXT:    vmovdqa 3648(%rdi), %xmm1
+; AVX512F-NEXT:    vinserti128 $1, 3776(%rdi), %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa64 3584(%rdi), %xmm31
+; AVX512F-NEXT:    vinserti32x4 $1, 3712(%rdi), %ymm31, %ymm31
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm19 = ymm31[0],ymm1[0],ymm31[2],ymm1[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm19, %zmm17, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm29[1],ymm9[1],ymm29[3],ymm9[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm5, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm7[1],ymm27[1],ymm7[3],ymm27[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm3[1],ymm0[3],ymm3[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm22 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm20[1],ymm6[1],ymm20[3],ymm6[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm22, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm8 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm24[1],ymm30[1],ymm24[3],ymm30[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm8, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm15[1],ymm0[1],ymm15[3],ymm0[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm17[1],ymm4[1],ymm17[3],ymm4[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm8, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm9[1],ymm27[1],ymm9[3],ymm27[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm31 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm29[1],ymm12[1],ymm29[3],ymm12[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm31, %zmm9
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm25 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm10 = ymm26[1],ymm19[1],ymm26[3],ymm19[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm25, %zmm10
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm23 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm21[1],ymm7[1],ymm21[3],ymm7[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm23, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 448(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 384(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 320(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 256(%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 192(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm1, 128(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm1, 64(%rsi)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm1, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 448(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, 256(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, 320(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 128(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 192(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm30[1],ymm23[1],ymm30[3],ymm23[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm20[1],ymm4[3],ymm20[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm11 = ymm28[1],ymm25[1],ymm28[3],ymm25[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm5, %zmm11
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm26[1],ymm6[1],ymm26[3],ymm6[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm31[1],ymm1[1],ymm31[3],ymm1[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 448(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, 384(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm24, 320(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, 256(%rsi)
+; AVX512F-NEXT:    vmovups (%rsp), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 192(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 128(%rsi)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm2, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 448(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 256(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, 320(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 192(%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 64(%rdx)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 384(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 384(%rdx)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 448(%rcx)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10179,834 +10179,846 @@ define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 384(%rax)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 384(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 256(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512F-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, 320(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 128(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512F-NEXT:    addq $6600, %rsp # imm = 0x19C8
+; AVX512F-NEXT:    addq $6920, %rsp # imm = 0x1B08
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i64_stride8_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $6600, %rsp # imm = 0x19C8
-; AVX512BW-NEXT:    vmovdqa64 3392(%rdi), %zmm13
-; AVX512BW-NEXT:    vmovdqa64 3328(%rdi), %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 3520(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 3456(%rdi), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    subq $6920, %rsp # imm = 0x1B08
+; AVX512BW-NEXT:    vmovdqa64 3392(%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 3328(%rdi), %zmm14
+; AVX512BW-NEXT:    vmovdqa64 3520(%rdi), %zmm11
+; AVX512BW-NEXT:    vmovdqa64 3456(%rdi), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 1984(%rdi), %zmm15
+; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 832(%rdi), %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 768(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 960(%rdi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 896(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm16
 ; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    movb $-64, %al
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa 3264(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vmovdqa 3264(%rdi), %ymm2
+; AVX512BW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-NEXT:    vmovdqa 3200(%rdi), %ymm0
 ; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512BW-NEXT:    vmovdqa 3136(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vmovdqa 3072(%rdi), %ymm14
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm14[0],ymm3[0],ymm14[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX512BW-NEXT:    vmovdqa 3136(%rdi), %ymm9
+; AVX512BW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vmovdqa 3072(%rdi), %ymm2
+; AVX512BW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa 704(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vmovdqa 640(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 704(%rdi), %ymm0
 ; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %ymm26
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %ymm23
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm23[0],ymm26[0],ymm23[2],ymm26[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %ymm30
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm30[0],ymm0[0],ymm30[2],ymm0[2]
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %ymm28
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %ymm27
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm27[0],ymm28[0],ymm27[2],ymm28[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm30
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm30[0],ymm0[0],ymm30[2],ymm0[2]
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm20
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm16[0],ymm20[0],ymm16[2],ymm20[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %ymm25
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm23
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm25[0],ymm23[2],ymm25[2]
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm21
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm22
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm22[0],ymm21[0],ymm22[2],ymm21[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vmovdqa64 1920(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1856(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 1792(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa 1728(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vmovdqa 1664(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %ymm21
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %ymm17
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm17[0],ymm21[0],ymm17[2],ymm21[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %ymm19
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %ymm18
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm18[0],ymm19[0],ymm18[2],ymm19[2]
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %ymm17
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %ymm24
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm24[0],ymm17[0],ymm24[2],ymm17[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 1472(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 1408(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 1344(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1280(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 1216(%rdi), %ymm0
 ; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %ymm25
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm25[0],ymm0[0],ymm25[2],ymm0[2]
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %ymm24
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %ymm22
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm22[0],ymm24[0],ymm22[2],ymm24[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %ymm29
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm29[0],ymm0[0],ymm29[2],ymm0[2]
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %ymm31
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %ymm16
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm16[0],ymm31[0],ymm16[2],ymm31[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 3008(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2944(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 2880(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 2880(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2816(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa 2752(%rdi), %ymm3
-; AVX512BW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vmovdqa 2752(%rdi), %ymm2
+; AVX512BW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-NEXT:    vmovdqa 2688(%rdi), %ymm0
 ; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
-; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %ymm31
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX512BW-NEXT:    vmovdqa 2624(%rdi), %ymm2
+; AVX512BW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-NEXT:    vmovdqa 2560(%rdi), %ymm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm10[0],ymm31[0],ymm10[2],ymm31[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm10[0],ymm2[0],ymm10[2],ymm2[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2496(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2432(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 2368(%rdi), %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 2368(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2304(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 2240(%rdi), %ymm28
-; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %ymm19
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm19[0],ymm28[0],ymm19[2],ymm28[2]
-; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %ymm27
+; AVX512BW-NEXT:    vmovdqa 2240(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %ymm26
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm26[0],ymm0[0],ymm26[2],ymm0[2]
+; AVX512BW-NEXT:    vmovdqa 2112(%rdi), %ymm8
 ; AVX512BW-NEXT:    vmovdqa 2048(%rdi), %ymm6
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm6[0],ymm27[0],ymm6[2],ymm27[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 4032(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 4032(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 3968(%rdi), %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 3968(%rdi), %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm13, %zmm1, %zmm20
+; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 3904(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 3840(%rdi), %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 3904(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 3840(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqa 3776(%rdi), %ymm12
-; AVX512BW-NEXT:    vmovdqa 3712(%rdi), %ymm9
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm9[0],ymm12[0],ymm9[2],ymm12[2]
-; AVX512BW-NEXT:    vmovdqa 3648(%rdi), %ymm4
-; AVX512BW-NEXT:    vmovdqa 3584(%rdi), %ymm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 3776(%rdi), %ymm20
+; AVX512BW-NEXT:    vmovdqa 3712(%rdi), %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm4[0],ymm20[0],ymm4[2],ymm20[2]
+; AVX512BW-NEXT:    vmovdqa 3648(%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 3584(%rdi), %ymm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
-; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm14 = ymm14[1],mem[1],ymm14[3],mem[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm15, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm12
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm15 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm15 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm14 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm14 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm14[2,3],ymm15[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm14 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm5, %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm23[1],ymm26[1],ymm23[3],ymm26[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm14 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm30, %ymm0 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm0 = ymm30[1],mem[1],ymm30[3],mem[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm16[1],ymm20[1],ymm16[3],ymm20[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm27[1],ymm28[1],ymm27[3],ymm28[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm14 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm17[1],ymm21[1],ymm17[3],ymm21[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm23[1],ymm25[1],ymm23[3],ymm25[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm22[1],ymm21[1],ymm22[3],ymm21[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm14
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm14
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm5, %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm25, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm0 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm22[1],ymm24[1],ymm22[3],ymm24[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm18[1],ymm19[1],ymm18[3],ymm19[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm24[1],ymm17[1],ymm24[3],ymm17[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm2, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm5, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm29, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm29[1],mem[1],ymm29[3],mem[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm16[1],ymm31[1],ymm16[3],ymm31[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm2, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm5, %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512BW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm10 = ymm10[1],ymm31[1],ymm10[3],ymm31[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm10 = ymm10[1],mem[1],ymm10[3],mem[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm10[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm14, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm0 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm10
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm19[1],ymm28[1],ymm19[3],ymm28[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm6[1],ymm27[1],ymm6[3],ymm27[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm26, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm26[1],mem[1],ymm26[3],mem[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],ymm0[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm19, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm9[1],ymm12[1],ymm9[3],ymm12[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
+; AVX512BW-NEXT:    vpermi2q %zmm13, %zmm10, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm13, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm4[1],ymm20[1],ymm4[3],ymm20[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [4,12,4,12,4,12,4,12]
 ; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm27[0],zmm12[0],zmm27[2],zmm12[2],zmm27[4],zmm12[4],zmm27[6],zmm12[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 3136(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 3072(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 3264(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm3 = zmm3[0],mem[0],zmm3[2],mem[2],zmm3[4],mem[4],zmm3[6],mem[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 3264(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 3200(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12]
 ; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm7[0],zmm13[0],zmm7[2],zmm13[2],zmm7[4],zmm13[4],zmm7[6],zmm13[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 3136(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 3072(%rdi), %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,12]
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm3, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm12
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm9[0],zmm11[0],zmm9[2],zmm11[2],zmm9[4],zmm11[4],zmm9[6],zmm11[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 704(%rdi), %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm29[0],zmm26[0],zmm29[2],zmm26[2],zmm29[4],zmm26[4],zmm29[6],zmm26[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm23
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 640(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm26
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm8[0],zmm16[0],zmm8[2],zmm16[2],zmm8[4],zmm16[4],zmm8[6],zmm16[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm9[0],zmm11[0],zmm9[2],zmm11[2],zmm9[4],zmm11[4],zmm9[6],zmm11[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm24[0],zmm15[0],zmm24[2],zmm15[2],zmm24[4],zmm15[4],zmm24[6],zmm15[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 2752(%rdi), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 2688(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm1, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm28[0],zmm30[0],zmm28[2],zmm30[2],zmm28[4],zmm30[4],zmm28[6],zmm30[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm11
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm25[0],zmm23[0],zmm25[2],zmm23[2],zmm25[4],zmm23[4],zmm25[6],zmm23[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1728(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1664(%rdi), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm24[0],zmm19[0],zmm24[2],zmm19[2],zmm24[4],zmm19[4],zmm24[6],zmm19[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1216(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1152(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 1088(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 1024(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm29[0],zmm31[0],zmm29[2],zmm31[2],zmm29[4],zmm31[4],zmm29[6],zmm31[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 2752(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2688(%rdi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 2240(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 2176(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm31 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm4[0],zmm8[0],zmm4[2],zmm8[2],zmm4[4],zmm8[4],zmm4[6],zmm8[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 3648(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 3584(%rdi), %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm7[0],zmm14[0],zmm7[2],zmm14[2],zmm7[4],zmm14[4],zmm7[6],zmm14[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 3776(%rdi), %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 3712(%rdi), %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm18
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm1 = zmm19[0],zmm28[0],zmm19[2],zmm28[2],zmm19[4],zmm28[4],zmm19[6],zmm28[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 3648(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 3584(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm10, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm25
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm13[0],zmm26[0],zmm13[2],zmm26[2],zmm13[4],zmm26[4],zmm13[6],zmm26[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [5,13,5,13,5,13,5,13]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,13]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm27[1],zmm12[1],zmm27[3],zmm12[3],zmm27[5],zmm12[5],zmm27[7],zmm12[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm24[1],zmm20[1],zmm24[3],zmm20[3],zmm24[5],zmm20[5],zmm24[7],zmm20[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm31[1],zmm13[1],zmm31[3],zmm13[3],zmm31[5],zmm13[5],zmm31[7],zmm13[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm5
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm4 # 64-byte Folded Reload
-; AVX512BW-NEXT:    # zmm4 = zmm29[1],mem[1],zmm29[3],mem[3],zmm29[5],mem[5],zmm29[7],mem[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm4 # 64-byte Folded Reload
-; AVX512BW-NEXT:    # zmm4 = zmm17[1],mem[1],zmm17[3],mem[3],zmm17[5],mem[5],zmm17[7],mem[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm5 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm5 = zmm17[1],mem[1],zmm17[3],mem[3],zmm17[5],mem[5],zmm17[7],mem[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm16
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm9[1],zmm11[1],zmm9[3],zmm11[3],zmm9[5],zmm11[5],zmm9[7],zmm11[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm12 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm19
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm28[1],zmm30[1],zmm28[3],zmm30[3],zmm28[5],zmm30[5],zmm28[7],zmm30[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm5 = zmm5[1],mem[1],zmm5[3],mem[3],zmm5[5],mem[5],zmm5[7],mem[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm5 = zmm5[1],mem[1],zmm5[3],mem[3],zmm5[5],mem[5],zmm5[7],mem[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm9
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm5 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm5 = zmm11[1],mem[1],zmm11[3],mem[3],zmm11[5],mem[5],zmm11[7],mem[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
-; AVX512BW-NEXT:    # zmm4 = zmm4[1],mem[1],zmm4[3],mem[3],zmm4[5],mem[5],zmm4[7],mem[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm4[1],zmm8[1],zmm4[3],zmm8[3],zmm4[5],zmm8[5],zmm4[7],zmm8[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm5 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm5 = zmm7[1],mem[1],zmm7[3],mem[3],zmm7[5],mem[5],zmm7[7],mem[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm3, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm25[1],zmm18[1],zmm25[3],zmm18[3],zmm25[5],zmm18[5],zmm25[7],zmm18[7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm7, %zmm2
+; AVX512BW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm1 = zmm25[1],mem[1],zmm25[3],mem[3],zmm25[5],mem[5],zmm25[7],mem[7]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm3 {%k1} # 64-byte Folded Reload
-; AVX512BW-NEXT:    # zmm3 {%k1} = zmm27[0],mem[0],zmm27[2],mem[2],zmm27[4],mem[4],zmm27[6],mem[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,14]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm31
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm31 {%k1} # 64-byte Folded Reload
-; AVX512BW-NEXT:    # zmm31 {%k1} = zmm24[0],mem[0],zmm24[2],mem[2],zmm24[4],mem[4],zmm24[6],mem[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm31, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm4 {%k1} # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm4 {%k1} = zmm20[0],mem[0],zmm20[2],mem[2],zmm20[4],mem[4],zmm20[6],mem[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm3 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm14[0],zmm28[0],zmm14[2],zmm28[2],zmm14[4],zmm28[4],zmm14[6],zmm28[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm21[0],zmm18[0],zmm21[2],zmm18[2],zmm21[4],zmm18[4],zmm21[6],zmm18[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm4 {%k1} # 64-byte Folded Reload
+; AVX512BW-NEXT:    # zmm4 {%k1} = zmm17[0],mem[0],zmm17[2],mem[2],zmm17[4],mem[4],zmm17[6],mem[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm30[0],zmm27[0],zmm30[2],zmm27[2],zmm30[4],zmm27[4],zmm30[6],zmm27[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm15
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm14[0],zmm30[0],zmm14[2],zmm30[2],zmm14[4],zmm30[4],zmm14[6],zmm30[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm15[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm31[0],zmm16[0],zmm31[2],zmm16[2],zmm31[4],zmm16[4],zmm31[6],zmm16[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm11
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm13 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm14[0],zmm21[0],zmm14[2],zmm21[2],zmm14[4],zmm21[4],zmm14[6],zmm21[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm28[0],zmm22[0],zmm28[2],zmm22[2],zmm28[4],zmm22[4],zmm28[6],zmm22[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm3 {%k1} # 64-byte Folded Reload
-; AVX512BW-NEXT:    # zmm3 {%k1} = zmm12[0],mem[0],zmm12[2],mem[2],zmm12[4],mem[4],zmm12[6],mem[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm26[0],zmm21[0],zmm26[2],zmm21[2],zmm26[4],zmm21[4],zmm26[6],zmm21[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm8
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm10[0],zmm12[0],zmm10[2],zmm12[2],zmm10[4],zmm12[4],zmm10[6],zmm12[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm3, %zmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k1} = zmm9[0],zmm5[0],zmm9[2],zmm5[2],zmm9[4],zmm5[4],zmm9[6],zmm5[6]
+; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k1} = zmm7[0],zmm31[0],zmm7[2],zmm31[2],zmm7[4],zmm31[4],zmm7[6],zmm31[6]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,1,9,1,9,1,9]
 ; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm4, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm1 {%k1} = zmm24[1],zmm3[1],zmm24[3],zmm3[3],zmm24[5],zmm3[5],zmm24[7],zmm3[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm1
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
-; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm24
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9]
-; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm23, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm15[1],zmm28[1],zmm15[3],zmm28[3],zmm15[5],zmm28[5],zmm15[7],zmm28[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm15
-; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm23, %zmm6
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm17, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm4, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm17, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm27
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm17, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm17, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm24
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm17, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm20
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm25
+; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm19
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm23, %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm19 {%k1} = zmm9[1],zmm0[1],zmm9[3],zmm0[3],zmm9[5],zmm0[5],zmm9[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm4, %zmm19
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm19 {%k1} = zmm30[1],zmm27[1],zmm30[3],zmm27[3],zmm30[5],zmm27[5],zmm30[7],zmm27[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm30
-; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm23, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm23[1],zmm0[1],zmm23[3],zmm0[3],zmm23[5],zmm0[5],zmm23[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm23
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm14, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm23, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm20
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm20 {%k1} = zmm31[1],zmm16[1],zmm31[3],zmm16[3],zmm31[5],zmm16[5],zmm31[7],zmm16[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm14, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm28
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm31
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm4, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm11 {%k1} = zmm3[1],zmm0[1],zmm3[3],zmm0[3],zmm3[5],zmm0[5],zmm3[7],zmm0[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm23, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm14, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm23, %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm26[1],zmm8[1],zmm26[3],zmm8[3],zmm26[5],zmm8[5],zmm26[7],zmm8[7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm23, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm23, %zmm25
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm18 {%k1} = zmm3[1],zmm0[1],zmm3[3],zmm0[3],zmm3[5],zmm0[5],zmm3[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm18, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm0, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm23, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm23, %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm23, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm18, %zmm0, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm4, %zmm0
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm9[1],zmm5[1],zmm9[3],zmm5[3],zmm9[5],zmm5[5],zmm9[7],zmm5[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm30
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm30 {%k1} = zmm6[1],zmm3[1],zmm6[3],zmm3[3],zmm6[5],zmm3[5],zmm6[7],zmm3[7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm7 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm8 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm10 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm11 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm12 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm13 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm27 {%k1} = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm26 {%k1} = zmm21[1],zmm0[1],zmm21[3],zmm0[3],zmm21[5],zmm0[5],zmm21[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm24 {%k1} = zmm28[1],zmm22[1],zmm28[3],zmm22[3],zmm28[5],zmm22[5],zmm28[7],zmm22[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm16, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm20 {%k1} = zmm10[1],zmm12[1],zmm10[3],zmm12[3],zmm10[5],zmm12[5],zmm10[7],zmm12[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm16, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm15 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm15
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm16
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm25 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,7,15]
 ; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [7,15]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
@@ -11019,160 +11031,155 @@ define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm0
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm26, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm13
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm6[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm25, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm15
-; AVX512BW-NEXT:    vinserti128 $1, 128(%rdi), %ymm15, %ymm15
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm30 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm30, %zmm24, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm3
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-NEXT:    vinserti128 $1, 128(%rdi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm1, %zmm18
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 576(%rdi), %xmm24
-; AVX512BW-NEXT:    vinserti32x4 $1, 704(%rdi), %ymm24, %ymm30
-; AVX512BW-NEXT:    vmovdqa64 512(%rdi), %xmm24
-; AVX512BW-NEXT:    vinserti32x4 $1, 640(%rdi), %ymm24, %ymm24
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm24[0],ymm30[0],ymm24[2],ymm30[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm1, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k1}
-; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %xmm9
-; AVX512BW-NEXT:    vinserti32x4 $1, 1216(%rdi), %ymm9, %ymm27
-; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %xmm9
-; AVX512BW-NEXT:    vinserti128 $1, 1152(%rdi), %ymm9, %ymm9
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm9[0],ymm27[0],ymm9[2],ymm27[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm28, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa 576(%rdi), %xmm7
+; AVX512BW-NEXT:    vinserti32x4 $1, 704(%rdi), %ymm7, %ymm27
+; AVX512BW-NEXT:    vmovdqa 512(%rdi), %xmm7
+; AVX512BW-NEXT:    vinserti128 $1, 640(%rdi), %ymm7, %ymm7
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm20 = ymm7[0],ymm27[0],ymm7[2],ymm27[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm20, %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k1}
-; AVX512BW-NEXT:    vmovdqa 1600(%rdi), %xmm4
-; AVX512BW-NEXT:    vinserti128 $1, 1728(%rdi), %ymm4, %ymm4
-; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %xmm17
-; AVX512BW-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm17, %ymm17
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm19 = ymm17[0],ymm4[0],ymm17[2],ymm4[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm19, %zmm29, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 2112(%rdi), %xmm19
-; AVX512BW-NEXT:    vinserti32x4 $1, 2240(%rdi), %ymm19, %ymm19
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa 1088(%rdi), %xmm4
+; AVX512BW-NEXT:    vinserti32x4 $1, 1216(%rdi), %ymm4, %ymm20
+; AVX512BW-NEXT:    vmovdqa 1024(%rdi), %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, 1152(%rdi), %ymm4, %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm4[0],ymm20[0],ymm4[2],ymm20[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm23, %zmm1, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 1600(%rdi), %xmm23
+; AVX512BW-NEXT:    vinserti32x4 $1, 1728(%rdi), %ymm23, %ymm23
+; AVX512BW-NEXT:    vmovdqa64 1536(%rdi), %xmm30
+; AVX512BW-NEXT:    vinserti32x4 $1, 1664(%rdi), %ymm30, %ymm30
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm26 = ymm30[0],ymm23[0],ymm30[2],ymm23[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm26, %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm29 {%k1}
+; AVX512BW-NEXT:    vmovdqa 2112(%rdi), %xmm6
+; AVX512BW-NEXT:    vinserti128 $1, 2240(%rdi), %ymm6, %ymm6
 ; AVX512BW-NEXT:    vmovdqa64 2048(%rdi), %xmm26
 ; AVX512BW-NEXT:    vinserti32x4 $1, 2176(%rdi), %ymm26, %ymm26
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm29 = ymm26[0],ymm19[0],ymm26[2],ymm19[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm29, %zmm20, %zmm5
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm25 = ymm26[0],ymm6[0],ymm26[2],ymm6[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm25, %zmm29, %zmm8
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm21 {%k1}
-; AVX512BW-NEXT:    vmovdqa 2624(%rdi), %xmm12
-; AVX512BW-NEXT:    vinserti128 $1, 2752(%rdi), %ymm12, %ymm12
-; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %xmm29
-; AVX512BW-NEXT:    vinserti32x4 $1, 2688(%rdi), %ymm29, %ymm29
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm18 = ymm29[0],ymm12[0],ymm29[2],ymm12[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm18, %zmm21, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 2624(%rdi), %xmm25
+; AVX512BW-NEXT:    vinserti32x4 $1, 2752(%rdi), %ymm25, %ymm25
+; AVX512BW-NEXT:    vmovdqa64 2560(%rdi), %xmm28
+; AVX512BW-NEXT:    vinserti32x4 $1, 2688(%rdi), %ymm28, %ymm28
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm28[0],ymm25[0],ymm28[2],ymm25[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm24, %zmm1, %zmm24
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k1}
-; AVX512BW-NEXT:    vmovdqa 3136(%rdi), %xmm6
-; AVX512BW-NEXT:    vinserti128 $1, 3264(%rdi), %ymm6, %ymm6
-; AVX512BW-NEXT:    vmovdqa64 3072(%rdi), %xmm20
-; AVX512BW-NEXT:    vinserti32x4 $1, 3200(%rdi), %ymm20, %ymm20
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm21 = ymm20[0],ymm6[0],ymm20[2],ymm6[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm21, %zmm16, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k1}
-; AVX512BW-NEXT:    vmovdqa 3648(%rdi), %xmm7
-; AVX512BW-NEXT:    vinserti128 $1, 3776(%rdi), %ymm7, %ymm7
-; AVX512BW-NEXT:    vmovdqa64 3584(%rdi), %xmm21
-; AVX512BW-NEXT:    vinserti32x4 $1, 3712(%rdi), %ymm21, %ymm21
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm28 = ymm21[0],ymm7[0],ymm21[2],ymm7[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm28, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqa 3136(%rdi), %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, 3264(%rdi), %ymm9, %ymm9
+; AVX512BW-NEXT:    vmovdqa64 3072(%rdi), %xmm29
+; AVX512BW-NEXT:    vinserti32x4 $1, 3200(%rdi), %ymm29, %ymm29
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm22 = ymm29[0],ymm9[0],ymm29[2],ymm9[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm22, %zmm31, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqa 3648(%rdi), %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, 3776(%rdi), %ymm1, %ymm1
+; AVX512BW-NEXT:    vmovdqa64 3584(%rdi), %xmm31
+; AVX512BW-NEXT:    vinserti32x4 $1, 3712(%rdi), %ymm31, %ymm31
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm19 = ymm31[0],ymm1[0],ymm31[2],ymm1[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm19, %zmm17, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm29[1],ymm9[1],ymm29[3],ymm9[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm5, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm7[1],ymm27[1],ymm7[3],ymm27[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm3[1],ymm0[3],ymm3[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm22 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm20[1],ymm6[1],ymm20[3],ymm6[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm22, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm8 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm24[1],ymm30[1],ymm24[3],ymm30[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm8, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm15[1],ymm0[1],ymm15[3],ymm0[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm17[1],ymm4[1],ymm17[3],ymm4[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm8, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm9[1],ymm27[1],ymm9[3],ymm27[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm31 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm29[1],ymm12[1],ymm29[3],ymm12[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm31, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm25 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm10 = ymm26[1],ymm19[1],ymm26[3],ymm19[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm25, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm23 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm21[1],ymm7[1],ymm21[3],ymm7[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm23, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 448(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 384(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 320(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 256(%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 192(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm1, 128(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm1, 64(%rsi)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm1, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 448(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, 256(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, 320(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, 128(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 192(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm30[1],ymm23[1],ymm30[3],ymm23[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm20[1],ymm4[3],ymm20[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm11 = ymm28[1],ymm25[1],ymm28[3],ymm25[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm5, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm26[1],ymm6[1],ymm26[3],ymm6[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm31[1],ymm1[1],ymm31[3],ymm1[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 448(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, 384(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 320(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, 256(%rsi)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 192(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 128(%rsi)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm2, 64(%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 448(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 256(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, 320(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 192(%rdx)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 64(%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 384(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 384(%rdx)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 448(%rcx)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -11256,22 +11263,19 @@ define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 384(%rax)
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 384(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 256(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512BW-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, 320(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 128(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512BW-NEXT:    addq $6600, %rsp # imm = 0x19C8
+; AVX512BW-NEXT:    addq $6920, %rsp # imm = 0x1B08
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <512 x i64>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
index bac0fbe9df6f6..a4ae1fa373bf2 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
@@ -716,13 +716,11 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rsi)
-; AVX512F-NEXT:    vmovdqa %ymm4, (%rdx)
+; AVX512F-NEXT:    vmovdqa %ymm1, (%rdx)
 ; AVX512F-NEXT:    vmovdqa %ymm2, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1450,18 +1448,16 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm4 = ymm4[11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10],ymm4[27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm2
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm2
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm1 = ymm7[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpternlogq $202, %zmm2, %zmm8, %zmm5
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
 ; AVX512F-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
-; AVX512F-NEXT:    vpalignr {{.*#+}} ymm2 = ymm2[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
-; AVX512F-NEXT:    vmovdqa %ymm2, 32(%rsi)
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
+; AVX512F-NEXT:    vpalignr {{.*#+}} ymm3 = ymm3[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
+; AVX512F-NEXT:    vmovdqa %ymm3, 32(%rsi)
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rdx)
 ; AVX512F-NEXT:    vmovdqa %ymm4, 32(%rcx)
 ; AVX512F-NEXT:    vmovdqa %ymm1, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
index e2195f1fc25a1..b033e50805164 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
@@ -944,7 +944,8 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm3
 ; AVX512F-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,4,0,4,0,4,8,12]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,4,8,12,0,4,8,12]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX512F-NEXT:    vpermt2d %ymm2, %ymm4, %ymm0
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
 ; AVX512F-NEXT:    vpmovdb %zmm2, %xmm5
@@ -1940,7 +1941,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %ymm7, %ymm3, %ymm5
 ; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm4
 ; AVX512F-NEXT:    vpshufb %ymm7, %ymm4, %ymm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,4,8,12,0,4,8,12]
+; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX512F-NEXT:    vpermt2d %ymm5, %ymm1, %ymm6
 ; AVX512F-NEXT:    vpmovdb %zmm2, %xmm5
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index b2c0e00825e63..b1b374abc3c7c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -3626,30 +3626,30 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm8, %ymm22, %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm12[0,1,2,3],zmm1[0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <u,0,0,u,8,8,u,9>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm4, %ymm23, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm23, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm14, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm4, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm2 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm2 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[2,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm14, %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm14 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm14 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm14, %ymm22, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm2[0,1,2,3],zmm5[0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm5
@@ -3658,42 +3658,42 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm21, %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm2[0],ymm9[0],ymm2[1],ymm9[1],ymm2[2],ymm9[2],ymm2[3],ymm9[3],ymm2[8],ymm9[8],ymm2[9],ymm9[9],ymm2[10],ymm9[10],ymm2[11],ymm9[11]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [1,1,1,1,10,10,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm18, %zmm7, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [8,9,20,11,12,21,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm18 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,9,20,11,12,21,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm10[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm8, %zmm0, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm8, %zmm4, %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [0,9,2,3,8,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm4, %ymm21, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm21, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,u,0,1,u,10,10,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm5, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm9[0],ymm4[1],ymm9[1],ymm4[2],ymm9[2],ymm4[3],ymm9[3],ymm4[8],ymm9[8],ymm4[9],ymm9[9],ymm4[10],ymm9[10],ymm4[11],ymm9[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm9[0],ymm0[1],ymm9[1],ymm0[2],ymm9[2],ymm0[3],ymm9[3],ymm0[8],ymm9[8],ymm0[9],ymm9[9],ymm0[10],ymm9[10],ymm0[11],ymm9[11]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm7, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm7, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm0 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm3, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm3, %zmm0, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm21, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm21, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm4[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm4[0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm3, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -8847,7 +8847,7 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512DQ-FAST-LABEL: store_i16_stride6_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1224, %rsp # imm = 0x4C8
+; AVX512DQ-FAST-NEXT:    subq $1208, %rsp # imm = 0x4B8
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm0
@@ -8870,8 +8870,8 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm6
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm6[0],ymm2[0],ymm6[1],ymm2[1],ymm6[2],ymm2[2],ymm6[3],ymm2[3],ymm6[8],ymm2[8],ymm6[9],ymm2[9],ymm6[10],ymm2[10],ymm6[11],ymm2[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %xmm6
@@ -8919,26 +8919,26 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [0,9,2,3,8,5,6,11]
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm19, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = <0,u,0,1,u,10,10,u>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <0,u,0,1,u,10,10,u>
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %ymm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm5
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm5[0],ymm9[0],ymm5[1],ymm9[1],ymm5[2],ymm9[2],ymm5[3],ymm9[3],ymm5[8],ymm9[8],ymm5[9],ymm9[9],ymm5[10],ymm9[10],ymm5[11],ymm9[11]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm14
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm21
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm15
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm24
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm4
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm6, %zmm20, %zmm26
 ; AVX512DQ-FAST-NEXT:    vmovdqa32 %zmm4, %zmm26 {%k1}
@@ -8948,15 +8948,15 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm25, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm19, %ymm26
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm11
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm23, %zmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8976,19 +8976,19 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpermt2d %ymm7, %ymm19, %ymm17
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm7
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm7[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm0
@@ -9003,27 +9003,26 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm20, %zmm25
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm19, %ymm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm14
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm14[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm9, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm5, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm5[4],ymm9[4],ymm5[5],ymm9[5],ymm5[6],ymm9[6],ymm5[7],ymm9[7],ymm5[12],ymm9[12],ymm5[13],ymm9[13],ymm5[14],ymm9[14],ymm5[15],ymm9[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [2,1,2,3,11,11,11,11]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [5,6,5,6,5,6,7,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
 ; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm27, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
@@ -9042,20 +9041,21 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm30, %ymm8
 ; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm8[0,1,2,3],zmm0[0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm5
 ; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
 ; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = <2,2,u,3,10,u,10,11>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm12, %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm10
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm12, %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
@@ -9064,234 +9064,231 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm3, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm3, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm9, %ymm10
 ; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm23, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm30, %ymm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm14[0],xmm4[0],xmm14[1],xmm4[1],xmm14[2],xmm4[2],xmm14[3],xmm4[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [0,0,2,1,8,9,8,9]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm19, %zmm3
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,0,2,2,1,0,2,2]
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm30, %ymm1
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm8, %ymm13
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,0,2,1,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm21, %zmm2
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [1,0,2,2,1,0,2,2]
+; AVX512DQ-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm19, %ymm3
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm4, %zmm3 {%k2}
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm4, %zmm3, %zmm2 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [16,9,10,17,12,13,18,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm28, %zmm4
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm24[0],zero,xmm24[1],zero,xmm24[2],zero,xmm24[3],zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm5, %ymm16, %ymm3
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm4, %zmm28, %zmm3
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,1,8,3,4,9,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm16, %ymm2
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm4[0,0,2,1,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <u,0,0,u,8,8,u,9>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm18, %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm3, %zmm4 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm28, %zmm3
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm5, %ymm16, %ymm4
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm4[0,1,2,3],zmm3[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm8, %xmm21
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm2, %ymm19, %ymm2
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm4, %zmm2, %zmm3 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm4, %zmm28, %zmm2
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm16, %ymm3
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm3[0,1,2,3],zmm2[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm18, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm5
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[8],ymm3[8],ymm5[9],ymm3[9],ymm5[10],ymm3[10],ymm5[11],ymm3[11]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm6[4],ymm8[4],ymm6[5],ymm8[5],ymm6[6],ymm8[6],ymm6[7],ymm8[7],ymm6[12],ymm8[12],ymm6[13],ymm8[13],ymm6[14],ymm8[14],ymm6[15],ymm8[15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm6, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[8],ymm2[8],ymm4[9],ymm2[9],ymm4[10],ymm2[10],ymm4[11],ymm2[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm8[4],ymm6[4],ymm8[5],ymm6[5],ymm8[6],ymm6[6],ymm8[7],ymm6[7],ymm8[12],ymm6[12],ymm8[13],ymm6[13],ymm8[14],ymm6[14],ymm8[15],ymm6[15]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15]
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm5[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[8],ymm6[8],ymm8[9],ymm6[9],ymm8[10],ymm6[10],ymm8[11],ymm6[11]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm27, %ymm5
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm6, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm10, %ymm14
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm5
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm23, %zmm6
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm8[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm5, %ymm30, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm10, %ymm8
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm8[0],ymm5[0],ymm8[1],ymm5[1],ymm8[2],ymm5[2],ymm8[3],ymm5[3],ymm8[8],ymm5[8],ymm8[9],ymm5[9],ymm8[10],ymm5[10],ymm8[11],ymm5[11]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm10[4],ymm13[4],ymm10[5],ymm13[5],ymm10[6],ymm13[6],ymm10[7],ymm13[7],ymm10[12],ymm13[12],ymm10[13],ymm13[13],ymm10[14],ymm13[14],ymm10[15],ymm13[15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm5
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm3[0,1,2,3],zmm6[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm31, %zmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm4, %ymm27, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm4, %zmm5, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm9, %ymm12
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm4, %zmm23, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm30, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm11[4],ymm6[4],ymm11[5],ymm6[5],ymm11[6],ymm6[6],ymm11[7],ymm6[7],ymm11[12],ymm6[12],ymm11[13],ymm6[13],ymm11[14],ymm6[14],ymm11[15],ymm6[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm27, %ymm8
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm11, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm9, %ymm6
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm9[4],ymm11[4],ymm9[5],ymm11[5],ymm9[6],ymm11[6],ymm9[7],ymm11[7],ymm9[12],ymm11[12],ymm9[13],ymm11[13],ymm9[14],ymm11[14],ymm9[15],ymm11[15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm4
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm2[0,1,2,3],zmm5[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm14, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm31, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm11[4],ymm9[4],ymm11[5],ymm9[5],ymm11[6],ymm9[6],ymm11[7],ymm9[7],ymm11[12],ymm9[12],ymm11[13],ymm9[13],ymm11[14],ymm9[14],ymm11[15],ymm9[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm27, %ymm6
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm10, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm8
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm8, %zmm5, %zmm23
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm8, %ymm30, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm11[0],ymm9[0],ymm11[1],ymm9[1],ymm11[2],ymm9[2],ymm11[3],ymm9[3],ymm11[8],ymm9[8],ymm11[9],ymm9[9],ymm11[10],ymm9[10],ymm11[11],ymm9[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm6, %zmm9, %zmm4 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm9, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm6, %zmm4, %zmm23
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm9[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm30, %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm10
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm31, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm6, %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm31, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm13 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm11
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm21, %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm11
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm19, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm11, %ymm19, %ymm11
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm12, %zmm11, %zmm9 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm12, %zmm28, %zmm11
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm11, %ymm0, %ymm11
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm13, %ymm13
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm13, %zmm11, %zmm10 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm15 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm15[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm13, %zmm28, %zmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm8
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm21, %zmm12
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm8 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm9
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm19, %zmm13
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm9 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm14, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm13, %ymm19, %ymm13
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm14, %ymm14
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm0, %zmm13 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm13, %zmm28
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm9, %ymm16, %ymm10
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm16, %ymm13
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,2,3],zmm23[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm10[0,1,2,3],zmm11[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm11
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm6[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm18, %zmm10
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm13[0,1,2,3],zmm28[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm11
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm6[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm18, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm9, %zmm11, %zmm13
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm13, %zmm12 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm14[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm13, %zmm12, %zmm28
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm13 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm8, %ymm16, %ymm9
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm13, %ymm16, %ymm12
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm23[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm11[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm11, %xmm9
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm18, %zmm11
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm12[0,1,2,3],zmm28[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm13, %xmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm18, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm9, %zmm12, %zmm13
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm5, %zmm11, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm5, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm29, %zmm5, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm20[0,1,2,3],zmm25[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm0 = zmm17[0,1,2,3],mem[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm12, %zmm11, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm7 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm8, %zmm12, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm4, %zmm8, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm5, %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm20[0,1,2,3],zmm25[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm2, %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm2 = zmm17[0,1,2,3],mem[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm2, %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 448(%rax)
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm24, %zmm12, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm7 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm1 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 704(%rax)
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 512(%rax)
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 704(%rax)
 ; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # zmm0 = zmm26[0,1,2,3],mem[0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm4, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 640(%rax)
 ; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # zmm0 = zmm22[0,1,2,3],mem[0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vpternlogd $184, %zmm0, %zmm4, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512DQ-FAST-NEXT:    addq $1224, %rsp # imm = 0x4C8
+; AVX512DQ-FAST-NEXT:    addq $1208, %rsp # imm = 0x4B8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 3de19185ea053..5f8208dac6936 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -483,11 +483,11 @@ define void @store_i16_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
 ; AVX512F-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,2,4,6,0,2,4,6]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,2,4,0,0,2,4,0]
 ; AVX512F-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,4,5,8,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[18,19,22,23,26,27],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [1,3,5,7,1,3,5,7]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [1,3,5,0,1,3,5,0]
 ; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[0,1,4,5,8,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[18,19,22,23,26,27],zero,zero,zero,zero,zero,zero,zero,zero
@@ -12036,7 +12036,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512BW-LABEL: store_i16_stride7_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $136, %rsp
+; AVX512BW-NEXT:    subq $72, %rsp
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm29
@@ -12064,7 +12064,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm3 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,59,u,u,4,5,6,7,60,u,u,11,12,13,14,61,u,u,18,19,20,21,62,u,u,25,26,27,28,63,u,u>
 ; AVX512BW-NEXT:    vpermi2w %zmm30, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [0,25,57,23,55,0,0,0,26,58,24,56,0,0,0,27,0,25,57,23,55,0,0,0,26,58,24,56,0,0,0,27]
 ; AVX512BW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm0
@@ -12078,7 +12078,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm3 {%k2}
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,55,u,u,8,9,10,11,56,u,u,15,16,17,18,57,u,u,22,23,24,25,58,u,u,29,30,31>
 ; AVX512BW-NEXT:    vpermi2w %zmm30, %zmm3, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [2,34,0,32,0,0,0,3,35,1,33,0,0,0,4,36,2,34,0,32,0,0,0,3,35,1,33,0,0,0,4,36]
 ; AVX512BW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm0
@@ -12092,7 +12092,6 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm7 {%k3}
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,0,2,34,0,32,0,0,0,3,35,1,33,0,0,0,0,0,2,34,0,32,0,0,0,3,35,1,33,0,0,0]
 ; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm0
 ; AVX512BW-NEXT:    vpermt2w %zmm8, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm3
@@ -12121,7 +12120,8 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm22
 ; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [0,0,2,34,0,32,0,0,0,3,35,1,33,0,0,0,0,0,2,34,0,32,0,0,0,3,35,1,33,0,0,0]
+; AVX512BW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2w %zmm26, %zmm25, %zmm30
 ; AVX512BW-NEXT:    vmovdqa64 64(%rax), %zmm25
 ; AVX512BW-NEXT:    vpermt2w %zmm25, %zmm2, %zmm30
@@ -12232,10 +12232,10 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqu16 %zmm28, %zmm19 {%k1}
 ; AVX512BW-NEXT:    vmovdqu16 %zmm4, %zmm14 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,59,u,4,5,6,7,8,60,u,11,12,13,14,15,61,u,18,19,20,21,22,62,u,25,26,27,28,29,63,u>
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermi2w %zmm26, %zmm2, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,55,u,8,9,10,11,12,56,u,15,16,17,18,19,57,u,22,23,24,25,26,58,u,29,30,31>
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermi2w %zmm26, %zmm4, %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,59,4,5,6,7,8,9,60,11,12,13,14,15,16,61,18,19,20,21,22,23,62,25,26,27,28,29,30,63]
 ; AVX512BW-NEXT:    vpermi2w %zmm25, %zmm1, %zmm4
@@ -12256,7 +12256,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, 768(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, 832(%rax)
-; AVX512BW-NEXT:    addq $136, %rsp
+; AVX512BW-NEXT:    addq $72, %rsp
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <64 x i16>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
index 1073c24b228ba..c167d88b9121e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
@@ -6515,13 +6515,13 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-LABEL: store_i32_stride5_vf64:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm23
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm17
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm20
 ; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm18
 ; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm1
 ; AVX512F-NEXT:    vmovdqa64 64(%rdx), %zmm3
 ; AVX512F-NEXT:    vmovdqa64 128(%rdx), %zmm29
-; AVX512F-NEXT:    vmovdqa64 192(%rdx), %zmm15
+; AVX512F-NEXT:    vmovdqa64 192(%rdx), %zmm23
 ; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm21
 ; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm25
 ; AVX512F-NEXT:    vmovdqa64 128(%rcx), %zmm24
@@ -6531,197 +6531,196 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512F-NEXT:    vpermt2d %zmm21, %zmm28, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,2,18,0,1,17,3,0,16,2,18,0,1,17,3]
-; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,16,2,18,0,1,17,3,0,16,2,18,0,1,17,3]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [14,30,0,13,29,15,31,0,14,30,0,13,29,15,31,0]
 ; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm4
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [0,13,29,15,31,0,14,30,0,13,29,15,31,0,14,30]
-; AVX512F-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm17, %zmm2
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [0,13,29,15,31,0,14,30,0,13,29,15,31,0,14,30]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm2
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm14, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [27,0,10,26,12,28,0,11,27,0,10,26,12,28,0,11]
 ; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm5
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm2, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [9,27,11,0,26,10,28,12,9,27,11,0,26,10,28,12]
-; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [9,27,11,0,26,10,28,12,9,27,11,0,26,10,28,12]
+; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm31
-; AVX512F-NEXT:    vpermt2d %zmm15, %zmm14, %zmm31
+; AVX512F-NEXT:    vpermt2d %zmm23, %zmm10, %zmm31
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [6,22,8,24,0,7,23,9,6,22,8,24,0,7,23,9]
 ; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm22
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm13, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm27
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm13, %zmm27
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,3,19,5,21,0,4,20,0,3,19,5,21,0,4,20]
 ; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm30, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm28, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm30, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm28, %zmm23
 ; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm0
 ; AVX512F-NEXT:    vpermt2d %zmm24, %zmm4, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm20
-; AVX512F-NEXT:    vpermt2d %zmm29, %zmm14, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512F-NEXT:    vpermt2d %zmm24, %zmm13, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm29, %zmm10, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm19
+; AVX512F-NEXT:    vpermt2d %zmm24, %zmm13, %zmm19
 ; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm0
 ; AVX512F-NEXT:    vpermt2d %zmm24, %zmm30, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2d %zmm24, %zmm28, %zmm29
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm14, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm10, %zmm24
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
 ; AVX512F-NEXT:    vpermt2d %zmm25, %zmm28, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [8,24,0,7,23,9,25,0,8,24,0,7,23,9,25,0]
 ; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2d %zmm25, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512F-NEXT:    vpermt2d %zmm25, %zmm13, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm25, %zmm30, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm16
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm30, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm16
 ; AVX512F-NEXT:    vpermt2d %zmm18, %zmm28, %zmm16
 ; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm21, %zmm14
+; AVX512F-NEXT:    vpermi2d %zmm1, %zmm21, %zmm10
 ; AVX512F-NEXT:    vpermi2d %zmm21, %zmm1, %zmm13
 ; AVX512F-NEXT:    vpermt2d %zmm21, %zmm30, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [3,21,5,0,20,4,22,6,3,21,5,0,20,4,22,6]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [3,21,5,0,20,4,22,6,3,21,5,0,20,4,22,6]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm23, %zmm2, %zmm21
-; AVX512F-NEXT:    vpermt2d %zmm18, %zmm7, %zmm23
+; AVX512F-NEXT:    vpermt2d %zmm20, %zmm8, %zmm21
+; AVX512F-NEXT:    vpermt2d %zmm18, %zmm15, %zmm20
 ; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm18
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm17, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm11
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm5, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm18
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm14, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm12
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm28, %zmm12
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm30
-; AVX512F-NEXT:    vpermt2d %zmm19, %zmm2, %zmm30
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm7, %zmm19
+; AVX512F-NEXT:    vpermt2d %zmm17, %zmm8, %zmm30
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm15, %zmm17
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm8
-; AVX512F-NEXT:    vpermt2d %zmm1, %zmm7, %zmm8
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm3, %zmm15, %zmm7
 ; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512F-NEXT:    vpermt2d %zmm0, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-NEXT:    vpermt2d %zmm0, %zmm8, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm7, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm17, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm5, %zmm10
-; AVX512F-NEXT:    vpermt2d %zmm3, %zmm28, %zmm9
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm25, %zmm17
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm25, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm25, %zmm28
-; AVX512F-NEXT:    vpermt2d %zmm25, %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm15, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm14, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm1, %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm28, %zmm9
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm25, %zmm14
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm25, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2d %zmm3, %zmm25, %zmm28
+; AVX512F-NEXT:    vpermt2d %zmm25, %zmm8, %zmm3
 ; AVX512F-NEXT:    movw $12684, %ax # imm = 0x318C
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm7 {%k1}
 ; AVX512F-NEXT:    movw $25368, %ax # imm = 0x6318
 ; AVX512F-NEXT:    kmovw %eax, %k2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k2}
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm31 {%k1}
-; AVX512F-NEXT:    vmovdqa32 %zmm16, %zmm22 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm8 {%k2}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm31 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm16, %zmm27 {%k2}
 ; AVX512F-NEXT:    movw $6342, %ax # imm = 0x18C6
 ; AVX512F-NEXT:    kmovw %eax, %k3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm21 {%k3}
-; AVX512F-NEXT:    vmovdqa32 %zmm15, %zmm23 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm21 {%k3}
+; AVX512F-NEXT:    vmovdqa32 %zmm23, %zmm20 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 192(%r8), %zmm2
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [28,1,2,3,4,29,6,7,8,9,30,11,12,13,14,31]
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm5, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [28,1,2,3,4,29,6,7,8,9,30,11,12,13,14,31]
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm1, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm23
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [0,25,2,3,4,5,26,7,8,9,10,27,12,13,14,15]
 ; AVX512F-NEXT:    vpermt2d %zmm2, %zmm15, %zmm31
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [0,1,22,3,4,5,6,23,8,9,10,11,24,13,14,15]
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm16, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm16, %zmm27
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [0,1,2,19,4,5,6,7,20,9,10,11,12,21,14,15]
 ; AVX512F-NEXT:    vpermt2d %zmm2, %zmm25, %zmm21
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,16,5,6,7,8,17,10,11,12,13,18,15]
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm3, %zmm23
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,16,5,6,7,8,17,10,11,12,13,18,15]
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm8, %zmm20
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm18 {%k2}
-; AVX512F-NEXT:    vmovdqa32 %zmm11, %zmm20 {%k1}
-; AVX512F-NEXT:    vmovdqa32 %zmm12, %zmm27 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm11, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm12, %zmm19 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm30 {%k3}
-; AVX512F-NEXT:    vmovdqa32 %zmm29, %zmm19 {%k1}
+; AVX512F-NEXT:    vmovdqa32 %zmm29, %zmm17 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm5, %zmm18
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm15, %zmm20
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm16, %zmm27
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm1, %zmm18
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm15, %zmm22
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm16, %zmm19
 ; AVX512F-NEXT:    vpermt2d %zmm2, %zmm25, %zmm30
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm3, %zmm19
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm6 {%k2}
-; AVX512F-NEXT:    vmovdqa32 %zmm10, %zmm24 {%k1}
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm8, %zmm17
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k2}
+; AVX512F-NEXT:    vmovdqa32 %zmm6, %zmm24 {%k1}
 ; AVX512F-NEXT:    vmovdqa32 %zmm9, %zmm26 {%k2}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm4 {%k3}
 ; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm7, %zmm0 {%k1}
-; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm7
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm3, %zmm0
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm5, %zmm6
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm15, %zmm24
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm16, %zmm26
-; AVX512F-NEXT:    vpermt2d %zmm7, %zmm25, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm3, %zmm17 {%k2}
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm5, %zmm17
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm3, %zmm14 {%k1}
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm15, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
+; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm6
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm8, %zmm7
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm8, %zmm0
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm1, %zmm5
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm15, %zmm24
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm16, %zmm26
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm6, %zmm14 {%k2}
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm1, %zmm14
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm10 {%k1}
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm15, %zmm10
 ; AVX512F-NEXT:    vmovdqa32 %zmm28, %zmm13 {%k2}
 ; AVX512F-NEXT:    vpermt2d %zmm2, %zmm16, %zmm13
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa32 %zmm3, %zmm1 {%k3}
-; AVX512F-NEXT:    vpermt2d %zmm2, %zmm25, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%r9)
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa32 %zmm1, %zmm3 {%k3}
+; AVX512F-NEXT:    vpermt2d %zmm2, %zmm25, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 64(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm13, 128(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm14, 192(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 256(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, 192(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 256(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 320(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, 384(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm26, 448(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm24, 512(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 576(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 640(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 576(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 640(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm30, 704(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm27, 768(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, 832(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, 768(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm22, 832(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, 896(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm23, 960(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 960(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, 1024(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm27, 1088(%r9)
 ; AVX512F-NEXT:    vmovdqa64 %zmm31, 1152(%r9)
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovaps %zmm0, 1216(%r9)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 1216(%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, (%r9)
 ; AVX512F-NEXT:    addq $712, %rsp # imm = 0x2C8
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -6729,13 +6728,13 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-LABEL: store_i32_stride5_vf64:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm17
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm20
 ; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm18
 ; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %zmm29
-; AVX512BW-NEXT:    vmovdqa64 192(%rdx), %zmm15
+; AVX512BW-NEXT:    vmovdqa64 192(%rdx), %zmm23
 ; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm21
 ; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm25
 ; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm24
@@ -6745,197 +6744,196 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm28, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,2,18,0,1,17,3,0,16,2,18,0,1,17,3]
-; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,16,2,18,0,1,17,3,0,16,2,18,0,1,17,3]
+; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [14,30,0,13,29,15,31,0,14,30,0,13,29,15,31,0]
 ; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm4
 ; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [0,13,29,15,31,0,14,30,0,13,29,15,31,0,14,30]
-; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm17, %zmm2
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [0,13,29,15,31,0,14,30,0,13,29,15,31,0,14,30]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm2
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm14, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [27,0,10,26,12,28,0,11,27,0,10,26,12,28,0,11]
 ; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm5
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm2, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [9,27,11,0,26,10,28,12,9,27,11,0,26,10,28,12]
-; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [9,27,11,0,26,10,28,12,9,27,11,0,26,10,28,12]
+; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm31
-; AVX512BW-NEXT:    vpermt2d %zmm15, %zmm14, %zmm31
+; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm10, %zmm31
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [6,22,8,24,0,7,23,9,6,22,8,24,0,7,23,9]
 ; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm22
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm13, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm27
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm13, %zmm27
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,3,19,5,21,0,4,20,0,3,19,5,21,0,4,20]
 ; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm30, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm28, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm30, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm28, %zmm23
 ; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm0
 ; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm4, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm20
-; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm14, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm13, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm29, %zmm10, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm19
+; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm13, %zmm19
 ; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm0
 ; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm30, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2d %zmm24, %zmm28, %zmm29
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm14, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm10, %zmm24
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
 ; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm28, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [8,24,0,7,23,9,25,0,8,24,0,7,23,9,25,0]
 ; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm13, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm30, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm16
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm30, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm16
 ; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm28, %zmm16
 ; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm21, %zmm14
+; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm21, %zmm10
 ; AVX512BW-NEXT:    vpermi2d %zmm21, %zmm1, %zmm13
 ; AVX512BW-NEXT:    vpermt2d %zmm21, %zmm30, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [3,21,5,0,20,4,22,6,3,21,5,0,20,4,22,6]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [3,21,5,0,20,4,22,6,3,21,5,0,20,4,22,6]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm23, %zmm2, %zmm21
-; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm7, %zmm23
+; AVX512BW-NEXT:    vpermt2d %zmm20, %zmm8, %zmm21
+; AVX512BW-NEXT:    vpermt2d %zmm18, %zmm15, %zmm20
 ; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm18
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm17, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm11
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm5, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm18
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm14, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm12
 ; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm28, %zmm12
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm30
-; AVX512BW-NEXT:    vpermt2d %zmm19, %zmm2, %zmm30
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm7, %zmm19
+; AVX512BW-NEXT:    vpermt2d %zmm17, %zmm8, %zmm30
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm15, %zmm17
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm8
-; AVX512BW-NEXT:    vpermt2d %zmm1, %zmm7, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm15, %zmm7
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-NEXT:    vpermt2d %zmm0, %zmm8, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm7, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm17, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm5, %zmm10
-; AVX512BW-NEXT:    vpermt2d %zmm3, %zmm28, %zmm9
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm25, %zmm17
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm25, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2d %zmm1, %zmm25, %zmm28
-; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm15, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm14, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm1, %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm28, %zmm9
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm25, %zmm14
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm25, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2d %zmm3, %zmm25, %zmm28
+; AVX512BW-NEXT:    vpermt2d %zmm25, %zmm8, %zmm3
 ; AVX512BW-NEXT:    movw $12684, %ax # imm = 0x318C
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm7 {%k1}
 ; AVX512BW-NEXT:    movw $25368, %ax # imm = 0x6318
 ; AVX512BW-NEXT:    kmovd %eax, %k2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k2}
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm31 {%k1}
-; AVX512BW-NEXT:    vmovdqa32 %zmm16, %zmm22 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm8 {%k2}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm31 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm16, %zmm27 {%k2}
 ; AVX512BW-NEXT:    movw $6342, %ax # imm = 0x18C6
 ; AVX512BW-NEXT:    kmovd %eax, %k3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm21 {%k3}
-; AVX512BW-NEXT:    vmovdqa32 %zmm15, %zmm23 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm21 {%k3}
+; AVX512BW-NEXT:    vmovdqa32 %zmm23, %zmm20 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [28,1,2,3,4,29,6,7,8,9,30,11,12,13,14,31]
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm5, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [28,1,2,3,4,29,6,7,8,9,30,11,12,13,14,31]
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm23
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [0,25,2,3,4,5,26,7,8,9,10,27,12,13,14,15]
 ; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm15, %zmm31
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [0,1,22,3,4,5,6,23,8,9,10,11,24,13,14,15]
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm16, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm16, %zmm27
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [0,1,2,19,4,5,6,7,20,9,10,11,12,21,14,15]
 ; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm25, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,16,5,6,7,8,17,10,11,12,13,18,15]
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm3, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,16,5,6,7,8,17,10,11,12,13,18,15]
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm8, %zmm20
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm18 {%k2}
-; AVX512BW-NEXT:    vmovdqa32 %zmm11, %zmm20 {%k1}
-; AVX512BW-NEXT:    vmovdqa32 %zmm12, %zmm27 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm11, %zmm22 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm12, %zmm19 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm30 {%k3}
-; AVX512BW-NEXT:    vmovdqa32 %zmm29, %zmm19 {%k1}
+; AVX512BW-NEXT:    vmovdqa32 %zmm29, %zmm17 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm5, %zmm18
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm15, %zmm20
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm16, %zmm27
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm1, %zmm18
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm15, %zmm22
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm16, %zmm19
 ; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm25, %zmm30
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm3, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm6 {%k2}
-; AVX512BW-NEXT:    vmovdqa32 %zmm10, %zmm24 {%k1}
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm8, %zmm17
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqa32 %zmm6, %zmm24 {%k1}
 ; AVX512BW-NEXT:    vmovdqa32 %zmm9, %zmm26 {%k2}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa32 %zmm2, %zmm4 {%k3}
 ; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm7, %zmm0 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm7
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm5, %zmm6
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm15, %zmm24
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm16, %zmm26
-; AVX512BW-NEXT:    vpermt2d %zmm7, %zmm25, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm3, %zmm17 {%k2}
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm5, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm3, %zmm14 {%k1}
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm15, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm6
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm8, %zmm7
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm8, %zmm0
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm1, %zmm5
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm15, %zmm24
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm16, %zmm26
+; AVX512BW-NEXT:    vpermt2d %zmm6, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm6, %zmm14 {%k2}
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm1, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm10 {%k1}
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm15, %zmm10
 ; AVX512BW-NEXT:    vmovdqa32 %zmm28, %zmm13 {%k2}
 ; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm16, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa32 %zmm3, %zmm1 {%k3}
-; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm25, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%r9)
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm3 {%k3}
+; AVX512BW-NEXT:    vpermt2d %zmm2, %zmm25, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 64(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, 128(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, 192(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 256(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 256(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 320(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, 384(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, 448(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm24, 512(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 576(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 640(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 576(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, 640(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm30, 704(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, 768(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, 832(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 768(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, 832(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm18, 896(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, 960(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 960(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, 1024(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, 1088(%r9)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm31, 1152(%r9)
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovaps %zmm0, 1216(%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 1216(%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%r9)
 ; AVX512BW-NEXT:    addq $712, %rsp # imm = 0x2C8
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index 299d74fdf5f17..f41efc79b7b50 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -8650,7 +8650,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-LABEL: store_i32_stride6_vf64:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    subq $1160, %rsp # imm = 0x488
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %zmm9
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm5
 ; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm3
 ; AVX512F-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
@@ -8658,43 +8658,42 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm24
 ; AVX512F-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm29
 ; AVX512F-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm23
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %zmm15
 ; AVX512F-FAST-NEXT:    vmovdqa64 (%rcx), %zmm21
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
-; AVX512F-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm20, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
+; AVX512F-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm22
+; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm19, %zmm22
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
 ; AVX512F-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
 ; AVX512F-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm10, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
 ; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
 ; AVX512F-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, %zmm31
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, %zmm25
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm31
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm8
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
-; AVX512F-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm11, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm25
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
 ; AVX512F-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm12, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
+; AVX512F-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm13, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
 ; AVX512F-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm30, %zmm25
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm30, %zmm9
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm7, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -8702,14 +8701,14 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm10, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm8, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm11, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm12, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm13, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm30, %zmm5
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
@@ -8720,96 +8719,94 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm10, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm11, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm26
-; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm11, %zmm26
+; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm12, %zmm26
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm28
-; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm12, %zmm28
+; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm13, %zmm28
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm29, %zmm30, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm9, %zmm19
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm24
+; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm20, %zmm24
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm10
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm11
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm12
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm13
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm23, %zmm30, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
 ; AVX512F-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm23
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm23
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm29, %zmm23
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
-; AVX512F-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm7
-; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm8, %zmm7
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
-; AVX512F-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm2, %zmm5
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
+; AVX512F-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm18
+; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm3, %zmm18
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm7
+; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm7
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
+; AVX512F-FAST-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vpermt2d %zmm21, %zmm31, %zmm15
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm21
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm22
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm22
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm13
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm19, %zmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm10
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm10
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm30
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm29, %zmm30
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm10
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm6
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm8, %zmm6
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm21
-; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm9
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm3, %zmm9
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm5
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm5
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm21
+; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm12
 ; AVX512F-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm15
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, %zmm12
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm29, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm11
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, %zmm9
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm8, %zmm9
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm19, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm11
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm29, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm8
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm3, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm6
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm6
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm12
 ; AVX512F-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm0
+; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm19
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
-; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm24
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm29
+; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm3
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm4
-; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm8
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm1
 ; AVX512F-FAST-NEXT:    movb $-110, %al
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k2}
 ; AVX512F-FAST-NEXT:    movb $36, %al
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, %zmm17 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm31, %zmm23 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, %zmm27 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, %zmm19 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, %zmm5 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, %zmm22 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm16
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm16
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm16, (%rsp) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm17
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm17
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm23
@@ -8817,135 +8814,135 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm27
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm19
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, %zmm17
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm5
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm22
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm15
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, %zmm31 {%k2}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, %zmm27 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, %zmm22 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, %zmm27 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, %zmm16 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm27
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm22
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm31
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm27
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm30
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm17, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm21
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm22
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm10, %zmm17
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm21
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, %zmm16 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, %zmm14 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm28 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, %zmm26 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm28 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm16
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm15
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm3
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm16
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm14
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm2
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm26
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm14, %zmm28
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm13
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k2}
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm10, %zmm28
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, %zmm13
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm12
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, %zmm5 {%k2}
 ; AVX512F-FAST-NEXT:    vmovdqa64 192(%r8), %zmm0
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k1}
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k1}
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, %zmm5 {%k1}
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm29
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqa64 (%r9), %zmm3
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu64 (%rsp), %zmm18 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm18
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm18
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm20
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm14, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm19
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm13, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm23
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm23
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm24
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm20
+; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm8, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm25
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm24
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 64(%r9), %zmm2
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm27
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm22
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm30
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm19
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm17
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm21
-; AVX512F-FAST-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm16
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm15
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm3
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm26
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm28
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm13
-; AVX512F-FAST-NEXT:    vmovdqa64 192(%r9), %zmm2
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm9
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm10
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm29
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm12
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm14
-; AVX512F-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm15
+; AVX512F-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm31
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm27
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm30
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm22
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm17
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm21
+; AVX512F-FAST-NEXT:    vmovdqa64 128(%r9), %zmm3
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm16
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm14
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm2
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm26
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm28
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm12
+; AVX512F-FAST-NEXT:    vmovdqa64 192(%r9), %zmm3
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm9
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm10
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm29
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm11
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm13
+; AVX512F-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm1
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 1472(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 1408(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, 1344(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, 1344(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 1280(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, 1152(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 1088(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, 1088(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm28, 1024(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 960(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 896(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 832(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 896(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 832(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, 768(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, 704(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, 640(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, 576(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 576(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm30, 512(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 448(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 384(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, 256(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 448(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm31, 384(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 320(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, 256(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm23, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, 64(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, (%rax)
 ; AVX512F-FAST-NEXT:    addq $1160, %rsp # imm = 0x488
 ; AVX512F-FAST-NEXT:    vzeroupper
@@ -9219,7 +9216,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-LABEL: store_i32_stride6_vf64:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    subq $1160, %rsp # imm = 0x488
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm9
 ; AVX512BW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm5
 ; AVX512BW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
@@ -9227,43 +9224,42 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm24
 ; AVX512BW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm29
 ; AVX512BW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm23
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm15
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm21
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
-; AVX512BW-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm20, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [1,17,0,16,0,0,2,18,1,17,0,16,0,0,2,18]
+; AVX512BW-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm22
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm19, %zmm22
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,16,0,0,2,18,1,17,0,16,0,0,2,18,1,17]
 ; AVX512BW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
 ; AVX512BW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm10, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
 ; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
 ; AVX512BW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm31
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm25
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm31
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm8
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
-; AVX512BW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm11, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm25
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [8,24,0,0,10,26,9,25,8,24,0,0,10,26,9,25]
 ; AVX512BW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm12, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
+; AVX512BW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm13, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
 ; AVX512BW-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm30, %zmm25
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm30, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm7, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -9271,14 +9267,14 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm10, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm8, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm11, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm12, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm13, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm30, %zmm5
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
@@ -9289,96 +9285,94 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm10, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm8, %zmm1
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm11, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm26
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm11, %zmm26
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm12, %zmm26
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm28
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm12, %zmm28
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm13, %zmm28
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm29, %zmm30, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm9, %zmm19
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm24
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm20, %zmm24
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm7
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm10
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm11
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm12
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm23, %zmm2, %zmm13
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm23, %zmm30, %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
 ; AVX512BW-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm23
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm23
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm29, %zmm23
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
-; AVX512BW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm7
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm8, %zmm7
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
-; AVX512BW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm2, %zmm5
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [9,25,8,24,0,0,10,26,9,25,8,24,0,0,10,26]
+; AVX512BW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm18
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm3, %zmm18
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm7
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm7
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
+; AVX512BW-FAST-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm21, %zmm31, %zmm15
 ; AVX512BW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm21
 ; AVX512BW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm22
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm22
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm14
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm14
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm13
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm19, %zmm13
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm10
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm10
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm30
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm29, %zmm30
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm10
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm6
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm8, %zmm6
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm21
-; AVX512BW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm13
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm9
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm3, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm5
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm5
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm12
 ; AVX512BW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm15
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm12
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm12
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm29, %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm9
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm8, %zmm9
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm13
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm19, %zmm14
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm11
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm29, %zmm2
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm8
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm3, %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm6
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm6
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm12
 ; AVX512BW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm0
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm19
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm20
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm24
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm29
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm3
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm4
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm1, %zmm8
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm1
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm1
 ; AVX512BW-FAST-NEXT:    movb $-110, %al
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k2}
 ; AVX512BW-FAST-NEXT:    movb $36, %al
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm17 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm23 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm27 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm19 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm5 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm22 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,16,u,6,7,8,9,17,u,12,13,14,15>
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm16
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm16, (%rsp) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm17
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm23
@@ -9386,135 +9380,135 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm27
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm19
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm17
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm5
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm22
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm15
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm31 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm27 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm22 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm27 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm16 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm27
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm22
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm31
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm27
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm30
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm16
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm17, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm14
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm21
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm22
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <26,u,2,3,4,5,27,u,8,9,10,11,28,u,14,15>
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm10, %zmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm21
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm16 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm14 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm28 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm26 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm28 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm16
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm15
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm3
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm16
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm14
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm2
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm26
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm14, %zmm28
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k2}
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm10, %zmm28
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm13
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm9, %zmm12
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm5 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm0
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm2, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k1}
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm2
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k1}
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm7, %zmm5
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm5 {%k1}
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm24, %zmm5
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm18, %zmm29
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm12
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %zmm3
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm25, %zmm5
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
 ; AVX512BW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm18 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm18
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm18
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm20
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm14, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm19
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm13, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm23
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm23
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm24
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm31, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm20
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm8, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm25
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm24
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm2
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm27
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm22
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm30
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm19
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm17
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm21
-; AVX512BW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm16
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm15
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm3
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm26
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm28
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm2
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm4, %zmm9
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm11, %zmm10
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm6, %zmm29
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm7, %zmm12
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm0, %zmm14
-; AVX512BW-FAST-NEXT:    vpermt2d %zmm2, %zmm8, %zmm1
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm15
+; AVX512BW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm31
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm27
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm30
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm22
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm17
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm3
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm16
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm14
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm2
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm26
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm28
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm12
+; AVX512BW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm3
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm5, %zmm9
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm4, %zmm10
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm6, %zmm29
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm11
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm0, %zmm13
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm3, %zmm8, %zmm1
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, 1472(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm14, 1408(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, 1344(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm11, 1344(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm29, 1280(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, 1152(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, 1088(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, 1088(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm28, 1024(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm26, 960(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, 896(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, 832(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, 896(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm14, 832(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm16, 768(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, 704(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm17, 640(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm19, 576(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm22, 576(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm30, 512(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm22, 448(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm27, 384(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm25, 256(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, 192(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm27, 448(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm31, 384(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, 320(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, 256(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm23, 128(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm19, 64(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm18, (%rax)
 ; AVX512BW-FAST-NEXT:    addq $1160, %rsp # imm = 0x488
 ; AVX512BW-FAST-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
index 99da0be38c2b4..460d9a3629c07 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
@@ -5247,128 +5247,128 @@ define void @store_i64_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm10
 ; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm9
 ; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqa64 256(%rsi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 256(%rsi), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm1
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <3,u,u,u,12,4,u,u>
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm12
 ; AVX512F-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm12
 ; AVX512F-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm12
 ; AVX512F-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm12
 ; AVX512F-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm12
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512F-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <0,8,u,u,u,1,9,u>
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm21, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,u,2,10,u,u,u,3>
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <u,5,13,u,u,u,6,14>
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
-; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm14, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm21, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm13, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm4, %zmm10
 ; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
-; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512F-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512F-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm4, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 320(%rsi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
+; AVX512F-NEXT:    vmovdqa64 320(%rsi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm20, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm4, %zmm6
 ; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 384(%rsi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm20, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm4, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 448(%rsi), %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm21
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
 ; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm14
 ; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm4, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm25
 ; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm0
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,3,11,u,u,u,4,12>
@@ -5710,128 +5710,128 @@ define void @store_i64_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm10
 ; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm9
 ; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 256(%rsi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 256(%rsi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <3,u,u,u,12,4,u,u>
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm12
 ; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm12
 ; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
 ; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm12
 ; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm12
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <0,8,u,u,u,1,9,u>
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,u,2,10,u,u,u,3>
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <u,5,13,u,u,u,6,14>
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm21, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm10
 ; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm7
 ; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 320(%rsi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 320(%rsi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm20, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm6
 ; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm20, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm21, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm21
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
 ; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm14
 ; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm25
 ; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,3,11,u,u,u,4,12>
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
index 0de7beea9398a..2a1f87fd1e1e3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
@@ -752,77 +752,73 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r10), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r10), %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [9,1,9,1,9,1,9,1]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,12,0,5,4,12,0,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $96, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm7 = xmm7[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [14,1,2,3,4,5,6,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm9, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,8,0,1,0,8,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm6 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%r10), %zmm8, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm5 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%r10), %zmm9, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,13,6,7,0,13,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm9, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm8[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm5 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm8, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm9, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [6,14,6,14]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-61, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,3,11,3,11,3,11,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [2,10,0,3,2,10,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [2,10,0,3,2,10,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm7[0],ymm12[2],ymm7[2]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm13[2,3,2,3],zmm2[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm13[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
@@ -831,42 +827,50 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-ONLY-SLOW-NEXT:    movb $6, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm13 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,9,0,3,4,9,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm11, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm15
 ; AVX512F-ONLY-SLOW-NEXT:    movb $56, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm13 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,5,13,5,13,5,13,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm13 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,12,0,5,4,12,0,5]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm11, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,1,12,7,0,1,12,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm10, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm7
 ; AVX512F-ONLY-SLOW-NEXT:    movb $120, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    movb $48, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm14 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm7[1],ymm12[3],ymm7[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm11[1],ymm12[3],ymm11[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm14 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm6 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 256(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 128(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 384(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
@@ -874,123 +878,127 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-ONLY-FAST:       # %bb.0:
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r10), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r10), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [15,7,15,7,15,7,15,7]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [9,1,9,1,9,1,9,1]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,0,5,4,12,0,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm5, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm9
 ; AVX512F-ONLY-FAST-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $96, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm7, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
-; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm7 = xmm7[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [14,1,2,3,4,5,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,8,0,1,0,8,0,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm6 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%r10), %zmm8, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm8 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%r10), %zmm9, %zmm9
 ; AVX512F-ONLY-FAST-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,13,6,7,0,13,6,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm8[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [6,14,6,14]
+; AVX512F-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $-61, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [3,0,12,4,3,0,12,4]
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    movb $48, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k2} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k2} = zmm2[0],zmm4[0],zmm2[2],zmm4[2],zmm2[4],zmm4[4],zmm2[6],zmm4[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm11
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,3,7,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %ymm9, %ymm12, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %ymm11, %ymm12, %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $14, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm7 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm10 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [11,3,11,3,11,3,11,3]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm13
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [2,10,0,3,2,10,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm9[2,3,2,3],zmm3[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [1,0,10,2,1,0,10,2]
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm11[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [1,0,10,2,1,0,10,2]
+; AVX512F-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm11
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $6, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm9 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,9,0,3,4,9,0,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm11 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm13
 ; AVX512F-ONLY-FAST-NEXT:    movb $56, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm9 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm11, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm2, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movb $120, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 384(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -1000,119 +1008,123 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r10), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r10), %zmm6
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [5,0,14,6,5,0,14,6]
 ; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,13,6,7,0,13,6,7]
-; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm8, %zmm2
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm2, %zmm5
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [6,14,6,14,6,14,6,14]
 ; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm2
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
+; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm9[0,1,2,3],zmm2[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $-61, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,3,11,3,11,3,11,3]
-; AVX512DQ-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,10,0,3,2,10,0,3]
-; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm5
 ; AVX512DQ-SLOW-NEXT:    movb $96, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm8
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm11[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm11[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm11
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm10, %zmm9, %zmm12
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm10, %zmm9, %zmm13
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,12,0,5,4,12,0,5]
-; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm10, %zmm9, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm11, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm11
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm12
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
 ; AVX512DQ-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm10, %zmm9
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,0,10,2,1,0,10,2]
-; AVX512DQ-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm13 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [14,1,2,3,4,5,6,15]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm13, %zmm11
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [1,0,10,2,1,0,10,2]
+; AVX512DQ-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm12
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm13
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = mem[0,1,2,3],ymm13[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm10 {%k2}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,9,0,3,4,9,0,3]
-; AVX512DQ-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm12, %zmm11
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm12 {%k2}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm13
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm13, %zmm14
 ; AVX512DQ-SLOW-NEXT:    movb $56, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k2}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,8,0,1,0,8,0,1]
-; AVX512DQ-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm12
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm12 = xmm12[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm12 {%k2}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [0,8,0,1,0,8,0,1]
+; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm14 = xmm14[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512DQ-SLOW-NEXT:    movb $12, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm11 {%k2}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm13 {%k2}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
 ; AVX512DQ-SLOW-NEXT:    movb $112, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%r10), %zmm13, %zmm11 {%k2}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,5,13,5,13,5,13,5]
-; AVX512DQ-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [3,0,12,4,3,0,12,4]
-; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,1,12,7,0,1,12,7]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm14, %zmm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%r10), %zmm14, %zmm13 {%k2}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm14
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm7
 ; AVX512DQ-SLOW-NEXT:    movb $120, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k1}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [3,0,12,4,3,0,12,4]
+; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm6
 ; AVX512DQ-SLOW-NEXT:    movb $48, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm13 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm10[1],ymm9[1],ymm10[3],ymm9[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $14, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm13 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 192(%rax)
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm6 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, (%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 384(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
@@ -1121,122 +1133,126 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-FAST:       # %bb.0:
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r10), %zmm3
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,0,14,6,5,0,14,6]
-; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm9, %zmm5
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,13,6,7,0,13,6,7]
-; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm6
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm5
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r10), %zmm0
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm4
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm4, %zmm8
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm4
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
+; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm9
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm9[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    movb $-61, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [3,0,12,4,3,0,12,4]
-; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4 {%k1}
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [3,0,12,4,3,0,12,4]
+; AVX512DQ-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm7, %zmm8
 ; AVX512DQ-FAST-NEXT:    movb $48, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm2[0],zmm5[0],zmm2[2],zmm5[2],zmm2[4],zmm5[4],zmm2[6],zmm5[6]
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,3,7,u>
-; AVX512DQ-FAST-NEXT:    vpermi2q %ymm10, %ymm11, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <1,3,7,u>
+; AVX512DQ-FAST-NEXT:    vpermi2q %ymm10, %ymm11, %ymm9
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    movb $14, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm6 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm8 {%k1}
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [11,3,11,3,11,3,11,3]
 ; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [2,10,0,3,2,10,0,3]
-; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm12
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm9
 ; AVX512DQ-FAST-NEXT:    movb $96, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm7 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm9 {%k1}
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
 ; AVX512DQ-FAST-NEXT:    movb $28, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm10[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm10[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm10
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm9, %zmm8, %zmm11
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm9, %zmm8, %zmm12
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [4,12,0,5,4,12,0,5]
-; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm9, %zmm8, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm10
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
-; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm12
 ; AVX512DQ-FAST-NEXT:    movb $24, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [1,0,10,2,1,0,10,2]
-; AVX512DQ-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = mem[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm12 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [14,1,2,3,4,5,6,15]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm10
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [1,0,10,2,1,0,10,2]
+; AVX512DQ-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm11
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    movb $6, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm9 {%k2}
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,9,0,3,4,9,0,3]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm11, %zmm10
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm11 {%k2}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm13
 ; AVX512DQ-FAST-NEXT:    movb $56, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,8,0,1,0,8,0,1]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm11 = xmm11[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k2}
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,8,0,1,0,8,0,1]
+; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm13 = xmm13[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512DQ-FAST-NEXT:    movb $12, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm10 {%k2}
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm12 {%k2}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm13
 ; AVX512DQ-FAST-NEXT:    movb $112, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%r10), %zmm12, %zmm10 {%k2}
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,5,13,5,13,5,13,5]
-; AVX512DQ-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%r10), %zmm13, %zmm12 {%k2}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm13
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm6 {%k1}
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm2
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
 ; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm13, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm2, %zmm1
 ; AVX512DQ-FAST-NEXT:    movb $120, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
@@ -1246,77 +1262,73 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r10), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r10), %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [9,1,9,1,9,1,9,1]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,12,0,5,4,12,0,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm5, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $96, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm7 = xmm7[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [14,1,2,3,4,5,6,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm9, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,8,0,1,0,8,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm6 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%r10), %zmm8, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm5 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%r10), %zmm9, %zmm9
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,13,6,7,0,13,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm9, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm8[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm5 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm8, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm9, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [6,14,6,14]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-61, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,3,11,3,11,3,11,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [2,10,0,3,2,10,0,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [2,10,0,3,2,10,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm7[0],ymm12[2],ymm7[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm13[2,3,2,3],zmm2[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm13[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
@@ -1325,42 +1337,50 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $6, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm13 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,9,0,3,4,9,0,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm11, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm15
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $56, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm13 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,5,13,5,13,5,13,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm13 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,12,0,5,4,12,0,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm11, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,1,12,7,0,1,12,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm10, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm7
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $120, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm14 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm7[1],ymm12[3],ymm7[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm11[1],ymm12[3],ymm11[3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm14 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm6 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 256(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 320(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 192(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 128(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 320(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 384(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512BW-ONLY-SLOW-NEXT:    retq
 ;
@@ -1368,123 +1388,127 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-FAST:       # %bb.0:
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r10), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r10), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [15,7,15,7,15,7,15,7]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm7, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [9,1,9,1,9,1,9,1]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,0,5,4,12,0,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm5, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm9
 ; AVX512BW-ONLY-FAST-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $96, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm7, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm7 = xmm7[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [14,1,2,3,4,5,6,15]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,8,0,1,0,8,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512BW-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm6 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%r10), %zmm8, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm8 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%r10), %zmm9, %zmm9
 ; AVX512BW-ONLY-FAST-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,13,6,7,0,13,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm8[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [6,14,6,14]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-61, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [3,0,12,4,3,0,12,4]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    movb $48, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k2} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k2} = zmm2[0],zmm4[0],zmm2[2],zmm4[2],zmm2[4],zmm4[4],zmm2[6],zmm4[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm11
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm12
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,3,7,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %ymm9, %ymm12, %ymm13
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %ymm11, %ymm12, %ymm13
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $14, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm7 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm10 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [11,3,11,3,11,3,11,3]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm13
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [2,10,0,3,2,10,0,3]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm9[2,3,2,3],zmm3[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [1,0,10,2,1,0,10,2]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm11[2,3,2,3],zmm0[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [1,0,10,2,1,0,10,2]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm12
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $6, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm9 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,9,0,3,4,9,0,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm11 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm13
 ; AVX512BW-ONLY-FAST-NEXT:    movb $56, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm9 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm11, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm2, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    movb $120, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 64(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 384(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 192(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 320(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 384(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512BW-ONLY-FAST-NEXT:    retq
 ;
@@ -1494,119 +1518,123 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r10), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r10), %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [5,0,14,6,5,0,14,6]
 ; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,13,6,7,0,13,6,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm8, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm2, %zmm5
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [6,14,6,14,6,14,6,14]
 ; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
+; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm9[0,1,2,3],zmm2[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $-61, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,3,11,3,11,3,11,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,10,0,3,2,10,0,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm5
 ; AVX512DQBW-SLOW-NEXT:    movb $96, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r8), %ymm8
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm5 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm11[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm11[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm10, %zmm9, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm10, %zmm9, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,12,0,5,4,12,0,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm10, %zmm9, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm11, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm10
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
 ; AVX512DQBW-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm10, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,0,10,2,1,0,10,2]
-; AVX512DQBW-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm11
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm13 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm13 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [14,1,2,3,4,5,6,15]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm13, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [1,0,10,2,1,0,10,2]
+; AVX512DQBW-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm13
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = mem[0,1,2,3],ymm13[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm10 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,9,0,3,4,9,0,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm12, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm12 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm13, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    movb $56, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,8,0,1,0,8,0,1]
-; AVX512DQBW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm12
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm12 = xmm12[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm12 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [0,8,0,1,0,8,0,1]
+; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm14 = xmm14[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512DQBW-SLOW-NEXT:    movb $12, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm11 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm13 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    movb $112, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%r10), %zmm13, %zmm11 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,5,13,5,13,5,13,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [3,0,12,4,3,0,12,4]
-; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,1,12,7,0,1,12,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm14, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%r10), %zmm14, %zmm13 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm14, %zmm7
 ; AVX512DQBW-SLOW-NEXT:    movb $120, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm7 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [3,0,12,4,3,0,12,4]
+; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    movb $48, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm13 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm10[1],ymm9[1],ymm10[3],ymm9[3]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $14, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm13 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 192(%rax)
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm6 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 256(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, (%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 64(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 384(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vzeroupper
 ; AVX512DQBW-SLOW-NEXT:    retq
@@ -1615,122 +1643,126 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-FAST:       # %bb.0:
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r10), %zmm3
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,0,14,6,5,0,14,6]
-; AVX512DQBW-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm9, %zmm5
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,13,6,7,0,13,6,7]
-; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm6
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm5
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm7
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r10), %zmm0
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm4
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm4, %zmm8
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm4
+; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [6,14,6,14]
+; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm9
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm9[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $-61, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [3,0,12,4,3,0,12,4]
-; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm4, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4 {%k1}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [3,0,12,4,3,0,12,4]
+; AVX512DQBW-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm7, %zmm8
 ; AVX512DQBW-FAST-NEXT:    movb $48, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm2[0],zmm5[0],zmm2[2],zmm5[2],zmm2[4],zmm5[4],zmm2[6],zmm5[6]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa (%r9), %ymm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqa (%r8), %ymm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,3,7,u>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %ymm10, %ymm11, %ymm7
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <1,3,7,u>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %ymm10, %ymm11, %ymm9
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $14, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k1
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm6 {%k1}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm8 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [11,3,11,3,11,3,11,3]
 ; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [2,10,0,3,2,10,0,3]
-; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm7
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm12
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm9
 ; AVX512DQBW-FAST-NEXT:    movb $96, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm7 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm9 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
 ; AVX512DQBW-FAST-NEXT:    movb $28, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm10[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm10[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm10
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm9, %zmm8, %zmm11
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm9, %zmm8, %zmm12
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [4,12,0,5,4,12,0,5]
-; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm9, %zmm8, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm10
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm11
+; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm12
 ; AVX512DQBW-FAST-NEXT:    movb $24, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm8
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [1,0,10,2,1,0,10,2]
-; AVX512DQBW-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm10
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = mem[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm12 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [14,1,2,3,4,5,6,15]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm10
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [1,0,10,2,1,0,10,2]
+; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm11
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm12
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $6, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm9 {%k2}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [4,9,0,3,4,9,0,3]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm11, %zmm10
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm11 {%k2}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm13
 ; AVX512DQBW-FAST-NEXT:    movb $56, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,8,0,1,0,8,0,1]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm11 = xmm11[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k2}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,8,0,1,0,8,0,1]
+; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm13 = xmm13[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512DQBW-FAST-NEXT:    movb $12, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm10 {%k2}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm12 {%k2}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm13
 ; AVX512DQBW-FAST-NEXT:    movb $112, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%r10), %zmm12, %zmm10 {%k2}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,5,13,5,13,5,13,5]
-; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%r10), %zmm13, %zmm12 {%k2}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm6, %zmm13
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm6 {%k1}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm13, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm2, %zmm1
 ; AVX512DQBW-FAST-NEXT:    movb $120, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, (%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 384(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 128(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQBW-FAST-NEXT:    vzeroupper
 ; AVX512DQBW-FAST-NEXT:    retq
   %in.vec0 = load <8 x i64>, ptr %in.vecptr0, align 64
@@ -2520,7 +2552,6 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14]
@@ -2556,16 +2587,14 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [15,7,15,7,15,7,15,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm17, %zmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm17, %zmm19
 ; AVX512F-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm16 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <u,1,2,3,4,15,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm16, %zmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,1,2,3,4,15,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm16, %zmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,1,12,7,0,1,12,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
@@ -2578,20 +2607,22 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [9,1,9,1,9,1,9,1]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm28, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm27, %zmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm28, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,1,2,3,4,5,15,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm27, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <13,u,2,3,4,5,6,14>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm26, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <13,u,2,3,4,5,6,14>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %ymm24
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm24[0],ymm15[0],ymm24[2],ymm15[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %sil
@@ -2599,17 +2630,17 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k3} = zmm23[2,3,2,3],zmm30[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [6,13,14,7,6,13,14,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,13,6,7,0,13,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm1, %zmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm29, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm31
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm26[0],ymm15[0],ymm26[2],ymm15[2]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm23[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [4,9,0,3,4,9,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
@@ -2620,100 +2651,101 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [14,1,2,3,4,5,6,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm30, %zmm25, %zmm22
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,13,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm30, %zmm1, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [3,0,12,4,3,0,12,4]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm30, %zmm26, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [1,0,10,2,1,0,10,2]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm29
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [1,0,10,2,1,0,10,2]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    movb $48, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm26, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm30
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm8 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm9 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm21, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm12 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm20, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $120, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $6, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm9 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm8 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $56, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm8 {%k5}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm10 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-31, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm10 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm7[0,1,2,3],zmm31[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-61, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm29 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm13 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm31 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm31 {%k5}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm24, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = ymm24[1],mem[1],ymm24[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm4 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm26 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm13 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm30 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm30 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm24[1],ymm15[1],ymm24[3],ymm15[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm26[1],ymm15[1],ymm26[3],ymm15[3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 64(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 128(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 512(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 512(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 576(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 704(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 448(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 768(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 832(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
@@ -2730,35 +2762,35 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm31
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [6,14,6,14,6,14,6,14]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm23
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm21 = [6,14,6,14]
 ; AVX512F-ONLY-FAST-NEXT:    # ymm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm11
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm19
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,8,0,1,0,8,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm13 = xmm13[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm14 = xmm14[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512F-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [0,8,0,8,0,8,0,8]
-; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm13, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm14, %zmm22
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm22, %zmm22
 ; AVX512F-ONLY-FAST-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
@@ -2767,113 +2799,115 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm16 = xmm16[0],mem[0]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm16, %zmm0, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm30, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm13, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [13,5,13,5,13,5,13,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm24, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm31, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm14, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm26, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm25, %zmm27
 ; AVX512F-ONLY-FAST-NEXT:    movb $96, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm26 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm27 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [4,12,0,5,4,12,0,5]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,1,12,7,0,1,12,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm27, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    movb $120, %sil
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [15,7,15,7,15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,1,12,7,0,1,12,7]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm14, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm23, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    movb $120, %sil
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm13, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm6, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm24 = [15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # ymm24 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm22
 ; AVX512F-ONLY-FAST-NEXT:    movb $24, %dil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %edi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm22 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    movb $-31, %dil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %edi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm22 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [5,0,14,6,5,0,14,6]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm28, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm29, %zmm30
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm17, %zmm1, %zmm20
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm21
 ; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm21[0,1,2,3],zmm20[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $-61, %dil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %edi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm20 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm13 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [3,0,12,4,3,0,12,4]
-; AVX512F-ONLY-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm14 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm26, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm27, %zmm21
 ; AVX512F-ONLY-FAST-NEXT:    movb $48, %sil
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm24, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm24 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm17, %zmm1, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm25 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm27, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm16 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm21 {%k3} = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %ymm26
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,3,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm27, %ymm6, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm26, %ymm6, %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $14, %sil
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm17, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm26 {%k3} = zmm12[0],zmm0[0],zmm12[2],zmm0[2],zmm12[4],zmm0[4],zmm12[6],zmm0[6]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm17, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k3} = zmm12[0],zmm0[0],zmm12[2],zmm0[2],zmm12[4],zmm0[4],zmm12[6],zmm0[6]
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm21 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %ymm24, %ymm8, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %ymm25, %ymm8, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm26 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm27[0],ymm28[2],ymm27[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm19[0,1,2,3],zmm18[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [11,3,11,3,11,3,11,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,10,0,3,2,10,0,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm27, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm27 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm26[0],ymm28[2],ymm26[2]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm18[0,1,2,3],zmm23[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [2,10,0,3,2,10,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm26, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm27, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm17 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm26, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm23, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm17 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm6[2,3,2,3],zmm31[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm6[2,3,2,3],zmm19[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm8[0],ymm24[0],ymm8[2],ymm24[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm6[2,3,2,3],zmm14[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm8[0],ymm25[0],ymm8[2],ymm25[2]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm6[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $6, %al
@@ -2882,46 +2916,48 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm6, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,9,0,3,4,9,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm6, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm6, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm6, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm6, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm0 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $64, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm18 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm18 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $56, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm12 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm23, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm25, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm24, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,1,2,3,4,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm30, %zmm4, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm4, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <u,1,2,3,4,5,15,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm5, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <13,u,2,3,4,5,6,14>
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm18, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm4, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm19, %zmm4, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,13,2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm5, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm19, %zmm5, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 192(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 256(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 320(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 512(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 576(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 448(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 768(%rax)
@@ -2934,7 +2970,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm5
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm9
@@ -2943,19 +2979,18 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm11
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm12
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm16 = [15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # ymm16 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14]
+; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm16, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm15
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm14
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm14
 ; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm14[0,1,2,3],zmm12[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $64, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k1
@@ -2965,173 +3000,174 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm25
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rax), %zmm22
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm28
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm17, %zmm21
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [13,5,13,5,13,5,13,5]
-; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm18, %zmm21
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    movb $96, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k1
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,12,0,5,4,12,0,5]
 ; AVX512DQ-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm24, %zmm11, %zmm17
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm18, %zmm26
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm24, %zmm11, %zmm18
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm18, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm17, %zmm19
 ; AVX512DQ-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm16 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <u,1,2,3,4,15,u,u>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm16, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,1,2,3,4,15,u,u>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm15, %zmm26
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm12, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,1,12,7,0,1,12,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm30, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [11,3,11,3,11,3,11,3]
 ; AVX512DQ-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm0
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [2,10,0,3,2,10,0,3]
-; AVX512DQ-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [9,1,9,1,9,1,9,1]
-; AVX512DQ-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm13
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15 {%k1}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm0
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm29, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm29, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm27, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm13
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,1,2,3,4,5,15,u>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm24, %zmm27, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <13,u,2,3,4,5,6,14>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm24, %zmm26, %zmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <13,u,2,3,4,5,6,14>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm26
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %ymm23
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm23[0],ymm5[0],ymm23[2],ymm5[2]
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm23[0],ymm13[0],ymm23[2],ymm13[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k3} = zmm24[2,3,2,3],zmm28[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k3} = zmm24[2,3,2,3],zmm28[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [6,13,14,7,6,13,14,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm29
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm29
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,13,6,7,0,13,6,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm5
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm22, %zmm1, %zmm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %ymm26
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm13, %zmm4, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm18 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm26[0],ymm5[0],ymm26[2],ymm5[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k3} = zmm24[2,3,2,3],zmm22[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm31
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm24[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [4,9,0,3,4,9,0,3]
 ; AVX512DQ-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm24, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm31
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm31
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm24, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [14,1,2,3,4,5,6,15]
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm28, %zmm25, %zmm22
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [0,13,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm28, %zmm1, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm28, %zmm26, %zmm24
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [1,0,10,2,1,0,10,2]
 ; AVX512DQ-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm25
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [3,0,12,4,3,0,12,4]
-; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [3,0,12,4,3,0,12,4]
+; AVX512DQ-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm28, %zmm9
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm26
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,8,0,1,0,8,0,1]
 ; AVX512DQ-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm30, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm30
 ; AVX512DQ-SLOW-NEXT:    movb $48, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm6
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm25, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
 ; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm13, %zmm4, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm13, %zmm4, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm13, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm4[0],zmm13[0],zmm4[2],zmm13[2],zmm4[4],zmm13[4],zmm4[6],zmm13[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm25
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-SLOW-NEXT:    movb $12, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm8 {%k5}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm8 {%k5}
 ; AVX512DQ-SLOW-NEXT:    movb $112, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k7
 ; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm21, %zmm8 {%k7}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm6 {%k1}
 ; AVX512DQ-SLOW-NEXT:    movb $120, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k3}
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm9 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm9 {%k4}
 ; AVX512DQ-SLOW-NEXT:    movb $56, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k6
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm9 {%k6}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k5}
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm17, %zmm30 {%k7}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm4
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm30 {%k5}
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm18, %zmm30 {%k7}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm10 {%k2}
 ; AVX512DQ-SLOW-NEXT:    movb $-31, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm10 {%k2}
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm7[0,1,2,3],zmm25[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm7[0,1,2,3],zmm26[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $-61, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm15 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm12 {%k3}
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm28 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm4 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm25 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm12 {%k3}
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm28 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm28 {%k6}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm23, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm23[1],ymm13[1],ymm23[3],ymm13[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $14, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm26[1],ymm5[1],ymm26[3],ymm5[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 128(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 384(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 448(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 512(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 576(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 576(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 640(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 704(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
@@ -3151,20 +3187,20 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm16
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm30
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm31
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [6,14,6,14,6,14,6,14]
 ; AVX512DQ-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm17
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm27
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm27
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm21 = [6,14,6,14]
 ; AVX512DQ-FAST-NEXT:    # ymm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm17
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm11
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm18
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,8,0,1,0,8,0,1]
 ; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
@@ -3176,17 +3212,19 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    movb $12, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm2 {%k2}
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm12
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm26, %zmm12
 ; AVX512DQ-FAST-NEXT:    movb $112, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm12, %zmm2 {%k3}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm12
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [13,5,13,5,13,5,13,5]
 ; AVX512DQ-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm23, %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm25
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm23, %zmm25
 ; AVX512DQ-FAST-NEXT:    movb $96, %sil
@@ -3194,49 +3232,50 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm25 {%k1}
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,12,0,5,4,12,0,5]
 ; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm10, %zmm30, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm19
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm10, %zmm31, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm19
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm12, %zmm19
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,1,12,7,0,1,12,7]
-; AVX512DQ-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm27, %zmm19
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm19
 ; AVX512DQ-FAST-NEXT:    movb $120, %sil
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm3, %zmm4
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm22 = xmm22[0],mem[0]
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm22, %zmm0, %zmm4 {%k2}
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm24, %zmm4 {%k3}
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [15,7,15,7,15,7,15,7]
-; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm22
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [6,13,14,7,6,13,14,7]
-; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm22, %zmm14, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm24, %zmm29
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm26, %zmm4 {%k3}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm22
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [6,13,14,7,6,13,14,7]
+; AVX512DQ-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm22, %zmm14, %zmm29
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm6, %zmm30
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [15,7,15,7]
 ; AVX512DQ-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm26, %zmm22
 ; AVX512DQ-FAST-NEXT:    movb $24, %dil
 ; AVX512DQ-FAST-NEXT:    kmovw %edi, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm22 {%k2}
 ; AVX512DQ-FAST-NEXT:    movb $-31, %dil
 ; AVX512DQ-FAST-NEXT:    kmovw %edi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, %zmm22 {%k3}
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [5,0,14,6,5,0,14,6]
-; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm28
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k3}
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [5,0,14,6,5,0,14,6]
 ; AVX512DQ-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm14, %zmm28, %zmm29
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm29
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm14, %zmm29, %zmm30
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm16, %zmm1, %zmm20
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm21
 ; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm21[0,1,2,3],zmm20[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    movb $-61, %dil
 ; AVX512DQ-FAST-NEXT:    kmovw %edi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm20 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19 {%k3}
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [3,0,12,4,3,0,12,4]
@@ -3244,21 +3283,20 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm21
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm25, %zmm21
 ; AVX512DQ-FAST-NEXT:    movb $48, %sil
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm28
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm16, %zmm1, %zmm24
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, %zmm23 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm23 {%k1}
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm27, %zmm12
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm28, %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm12 {%k3}
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm21 {%k3} = zmm3[0],zmm5[0],zmm3[2],zmm5[2],zmm3[4],zmm5[4],zmm3[6],zmm5[6]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %ymm24
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,3,7,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm27, %ymm6, %ymm8
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm24, %ymm6, %ymm8
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    movb $14, %sil
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm16, %zmm25
@@ -3269,23 +3307,23 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermi2q %ymm23, %ymm8, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm25 {%k3}
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm27[0],ymm28[2],ymm27[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm18[0,1,2,3],zmm17[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [11,3,11,3,11,3,11,3]
-; AVX512DQ-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm24[0],ymm28[2],ymm24[2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm17[0,1,2,3],zmm27[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,10,0,3,2,10,0,3]
 ; AVX512DQ-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm27, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm16
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm27, %zmm16
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm27
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm24, %zmm27
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm16 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $28, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm6[2,3,2,3],zmm31[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm6[2,3,2,3],zmm18[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,0,10,2,1,0,10,2]
 ; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm6, %zmm0
@@ -3301,35 +3339,37 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
 ; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm8
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm6, %zmm8
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,9,0,3,4,9,0,3]
 ; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm6, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm6, %zmm8
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm6, %zmm8
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm6
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm0 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $64, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $56, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm7
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm26, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,1,2,3,4,15,u,u>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm30, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm31, %zmm3, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,5,15,u>
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm10, %zmm5, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <13,u,2,3,4,5,6,14>
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm10, %zmm17, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm31, %zmm3, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm18, %zmm3, %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,13,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm31, %zmm5, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm18, %zmm5, %zmm3
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
@@ -3362,7 +3402,6 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14]
@@ -3398,16 +3437,14 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm18
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [15,7,15,7,15,7,15,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm17, %zmm26
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm19
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm17, %zmm19
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm16 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <u,1,2,3,4,15,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm16, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,1,2,3,4,15,u,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm16, %zmm26
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,1,12,7,0,1,12,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
@@ -3420,20 +3457,22 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [9,1,9,1,9,1,9,1]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm28, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm27, %zmm14
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm28, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm18
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,1,2,3,4,5,15,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm27, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <13,u,2,3,4,5,6,14>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm26, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <13,u,2,3,4,5,6,14>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm26
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %ymm24
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm24[0],ymm15[0],ymm24[2],ymm15[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %sil
@@ -3441,17 +3480,17 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k3} = zmm23[2,3,2,3],zmm30[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [6,13,14,7,6,13,14,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm28
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,13,6,7,0,13,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm1, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm29, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %ymm26
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm31
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm26[0],ymm15[0],ymm26[2],ymm15[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm23[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [4,9,0,3,4,9,0,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
@@ -3462,100 +3501,101 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [14,1,2,3,4,5,6,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm30, %zmm25, %zmm22
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,13,2,3,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm30, %zmm1, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [3,0,12,4,3,0,12,4]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm30, %zmm26, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm9
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [1,0,10,2,1,0,10,2]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm29
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [1,0,10,2,1,0,10,2]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm26, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
 ; AVX512BW-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm30
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm8 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm9 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm21, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm12 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm20, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $120, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $6, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm9 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm8 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $56, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm8 {%k5}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm10 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-31, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm10 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,2,3],zmm0[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm7[0,1,2,3],zmm31[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-61, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm29 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm13 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm31 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm31 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm24, %ymm4 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm4 = ymm24[1],mem[1],ymm24[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm4 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm26 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm13 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm30 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm30 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm24[1],ymm15[1],ymm24[3],ymm15[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm26[1],ymm15[1],ymm26[3],ymm15[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 64(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 64(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 128(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 256(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 320(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 512(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 512(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 576(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 704(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 448(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 768(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 832(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
@@ -3572,35 +3612,35 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm17
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm31
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [6,14,6,14,6,14,6,14]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm23
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm21 = [6,14,6,14]
 ; AVX512BW-ONLY-FAST-NEXT:    # ymm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm19
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm31
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm19
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,8,0,1,0,8,0,1]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %xmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm13 = xmm13[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm14 = xmm14[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [0,8,0,8,0,8,0,8]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm14, %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm13, %zmm22
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm14, %zmm22
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm22, %zmm22
 ; AVX512BW-ONLY-FAST-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
@@ -3609,113 +3649,115 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm16 = xmm16[0],mem[0]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm16, %zmm0, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm30, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm13, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [13,5,13,5,13,5,13,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm24, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm31, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm14, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm26, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm25, %zmm27
 ; AVX512BW-ONLY-FAST-NEXT:    movb $96, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm26 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm27 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [4,12,0,5,4,12,0,5]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,1,12,7,0,1,12,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm27, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    movb $120, %sil
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [15,7,15,7,15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,1,12,7,0,1,12,7]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm14, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm23, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    movb $120, %sil
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm22
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm13, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm6, %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm24 = [15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm24 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm22
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm22
 ; AVX512BW-ONLY-FAST-NEXT:    movb $24, %dil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %edi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm22 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-31, %dil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %edi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm22 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [5,0,14,6,5,0,14,6]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm28, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm29, %zmm30
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm17, %zmm1, %zmm20
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm21
 ; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm21[0,1,2,3],zmm20[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-61, %dil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %edi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm20 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm13 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [3,0,12,4,3,0,12,4]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm14 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm26, %zmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm27, %zmm21
 ; AVX512BW-ONLY-FAST-NEXT:    movb $48, %sil
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm24, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm24 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm17, %zmm1, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm25 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm27, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm16 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm21 {%k3} = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %ymm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %ymm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %ymm25
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %ymm26
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,3,7,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm27, %ymm6, %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm26, %ymm6, %ymm8
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $14, %sil
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm17, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm26 {%k3} = zmm12[0],zmm0[0],zmm12[2],zmm0[2],zmm12[4],zmm0[4],zmm12[6],zmm0[6]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm17, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k3} = zmm12[0],zmm0[0],zmm12[2],zmm0[2],zmm12[4],zmm0[4],zmm12[6],zmm0[6]
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm21 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %ymm24, %ymm8, %ymm6
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %ymm25, %ymm8, %ymm6
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm26 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm27[0],ymm28[2],ymm27[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm19[0,1,2,3],zmm18[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [11,3,11,3,11,3,11,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,10,0,3,2,10,0,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm27, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm27 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm26[0],ymm28[2],ymm26[2]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm18[0,1,2,3],zmm23[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [2,10,0,3,2,10,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm26, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm27, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm17 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm26, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm23, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm17 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm23
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm6[2,3,2,3],zmm31[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm6[2,3,2,3],zmm19[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm12
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm8[0],ymm24[0],ymm8[2],ymm24[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm6[2,3,2,3],zmm14[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm8[0],ymm25[0],ymm8[2],ymm25[2]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm6[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm6
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $6, %al
@@ -3724,46 +3766,48 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm6, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,9,0,3,4,9,0,3]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm6, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm6, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm6, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm6, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm6
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm0 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $64, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm18 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm18 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $56, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm12 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm23, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm25, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm24, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,1,2,3,4,15,u,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm30, %zmm4, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm4, %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <u,1,2,3,4,5,15,u>
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm5, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <13,u,2,3,4,5,6,14>
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm18, %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm4, %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm19, %zmm4, %zmm6
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,13,2,3,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm5, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm19, %zmm5, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 192(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 192(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 256(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 320(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 512(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 576(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 640(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 448(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 768(%rax)
@@ -3776,7 +3820,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm13
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm5
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm7
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm8
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm9
@@ -3785,19 +3829,18 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm12
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm16 = [15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # ymm16 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [6,14,6,14]
+; AVX512DQBW-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm16, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm10
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm15
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm14[0,1,2,3],zmm12[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $64, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k1
@@ -3807,173 +3850,174 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm25
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm22
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm17, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [13,5,13,5,13,5,13,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm18, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    movb $96, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k1
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,12,0,5,4,12,0,5]
 ; AVX512DQBW-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm24, %zmm11, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm18, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm24, %zmm11, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm18, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm17, %zmm19
 ; AVX512DQBW-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm16 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <u,1,2,3,4,15,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm16, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,1,2,3,4,15,u,u>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm15, %zmm26
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm12, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,1,12,7,0,1,12,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm30, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [11,3,11,3,11,3,11,3]
 ; AVX512DQBW-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = [2,10,0,3,2,10,0,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [9,1,9,1,9,1,9,1]
-; AVX512DQBW-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm14, %zmm25, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm25, %zmm14, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm29, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%r9), %ymm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm29, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm27, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%r9), %ymm13
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,1,2,3,4,5,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm24, %zmm27, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <13,u,2,3,4,5,6,14>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm24, %zmm26, %zmm25
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <13,u,2,3,4,5,6,14>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm24, %zmm23, %zmm26
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm23[0],ymm5[0],ymm23[2],ymm5[2]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm23[0],ymm13[0],ymm23[2],ymm13[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k3} = zmm24[2,3,2,3],zmm28[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k3} = zmm24[2,3,2,3],zmm28[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [6,13,14,7,6,13,14,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm29
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm29
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,13,6,7,0,13,6,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm5
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm22, %zmm1, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %ymm26
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm13, %zmm4, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm26[0],ymm5[0],ymm26[2],ymm5[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k3} = zmm24[2,3,2,3],zmm22[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm24[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [4,9,0,3,4,9,0,3]
 ; AVX512DQBW-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm24, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm31
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm24, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [14,1,2,3,4,5,6,15]
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm28, %zmm25, %zmm22
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [0,13,2,3,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm28, %zmm1, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm28, %zmm26, %zmm24
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [1,0,10,2,1,0,10,2]
 ; AVX512DQBW-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [3,0,12,4,3,0,12,4]
-; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [3,0,12,4,3,0,12,4]
+; AVX512DQBW-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm28, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,8,0,1,0,8,0,1]
 ; AVX512DQBW-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm30, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm8
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm30
 ; AVX512DQBW-SLOW-NEXT:    movb $48, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm25, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
 ; AVX512DQBW-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm13, %zmm4, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm13, %zmm4, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm13, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm4[0],zmm13[0],zmm4[2],zmm13[2],zmm4[4],zmm13[4],zmm4[6],zmm13[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm25
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQBW-SLOW-NEXT:    movb $12, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k5
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm8 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm8 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    movb $112, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k7
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm21, %zmm8 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm6 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movb $120, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm4
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = mem[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm9 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm9 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    movb $56, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm9 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm17, %zmm30 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm30 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm18, %zmm30 {%k7}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm10 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    movb $-31, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm10 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm7[0,1,2,3],zmm25[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm7[0,1,2,3],zmm26[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $-61, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm15 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm12 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm28 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm4 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm25 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm12 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm5
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm28 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm28 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm23, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,3,3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm23[1],ymm13[1],ymm23[3],ymm13[3]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,3,3]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $14, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm26[1],ymm5[1],ymm26[3],ymm5[3]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,3,3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 128(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 128(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 320(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 384(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, 448(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 512(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 576(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 576(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 640(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 704(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
@@ -3993,20 +4037,20 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm16
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm30
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm31
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [6,14,6,14,6,14,6,14]
 ; AVX512DQBW-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm17
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm27
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm27
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm21 = [6,14,6,14]
 ; AVX512DQBW-FAST-NEXT:    # ymm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm17
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm11
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm15
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rax), %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm31
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm18
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,8,0,1,0,8,0,1]
 ; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
@@ -4018,17 +4062,19 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    movb $12, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm2 {%k2}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm12
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm26, %zmm12
 ; AVX512DQBW-FAST-NEXT:    movb $112, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm12, %zmm2 {%k3}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm12
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [13,5,13,5,13,5,13,5]
 ; AVX512DQBW-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm23, %zmm12
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm25
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm23, %zmm25
 ; AVX512DQBW-FAST-NEXT:    movb $96, %sil
@@ -4036,49 +4082,50 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm25 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [4,12,0,5,4,12,0,5]
 ; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm10, %zmm30, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm19
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm10, %zmm31, %zmm26
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm19
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm12, %zmm19
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [0,1,12,7,0,1,12,7]
-; AVX512DQBW-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm27, %zmm19
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm19
 ; AVX512DQBW-FAST-NEXT:    movb $120, %sil
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm3, %zmm4
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm22 = xmm22[0],mem[0]
 ; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm22, %zmm0, %zmm4 {%k2}
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm24, %zmm4 {%k3}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [15,7,15,7,15,7,15,7]
-; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm22
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [6,13,14,7,6,13,14,7]
-; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm22, %zmm14, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm24, %zmm29
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm26, %zmm4 {%k3}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm22
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [6,13,14,7,6,13,14,7]
+; AVX512DQBW-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm22, %zmm14, %zmm29
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm30
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm6, %zmm30
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [15,7,15,7]
 ; AVX512DQBW-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm26, %zmm22
 ; AVX512DQBW-FAST-NEXT:    movb $24, %dil
 ; AVX512DQBW-FAST-NEXT:    kmovd %edi, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm22 {%k2}
 ; AVX512DQBW-FAST-NEXT:    movb $-31, %dil
 ; AVX512DQBW-FAST-NEXT:    kmovd %edi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, %zmm22 {%k3}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [5,0,14,6,5,0,14,6]
-; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm28
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k3}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [5,0,14,6,5,0,14,6]
 ; AVX512DQBW-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm14, %zmm28, %zmm29
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm15, %zmm29
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm14, %zmm29, %zmm30
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm16, %zmm1, %zmm20
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm21
 ; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm21[0,1,2,3],zmm20[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $-61, %dil
 ; AVX512DQBW-FAST-NEXT:    kmovd %edi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm20 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19 {%k3}
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [3,0,12,4,3,0,12,4]
@@ -4086,21 +4133,20 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm21
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm25, %zmm21
 ; AVX512DQBW-FAST-NEXT:    movb $48, %sil
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm28
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm16, %zmm1, %zmm24
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, %zmm23 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm23 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm11, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm27, %zmm12
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm28, %zmm12
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm12 {%k3}
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm21 {%k3} = zmm3[0],zmm5[0],zmm3[2],zmm5[2],zmm3[4],zmm5[4],zmm3[6],zmm5[6]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %ymm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %ymm27
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %ymm24
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,3,7,7]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %ymm28, %ymm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm27, %ymm6, %ymm8
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm24, %ymm6, %ymm8
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $14, %sil
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm16, %zmm25
@@ -4111,23 +4157,23 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %ymm23, %ymm8, %ymm6
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm25 {%k3}
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm27[0],ymm28[2],ymm27[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm18[0,1,2,3],zmm17[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [11,3,11,3,11,3,11,3]
-; AVX512DQBW-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm28[0],ymm24[0],ymm28[2],ymm24[2]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm17[0,1,2,3],zmm27[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,10,0,3,2,10,0,3]
 ; AVX512DQBW-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm27, %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm16
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm27, %zmm16
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm27
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm24, %zmm27
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm16 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm24
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $28, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm6[2,3,2,3],zmm31[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm6[2,3,2,3],zmm18[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,0,10,2,1,0,10,2]
 ; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm6, %zmm0
@@ -4143,35 +4189,37 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
 ; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm8
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm6, %zmm8
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,9,0,3,4,9,0,3]
 ; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm6, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm6, %zmm8
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm6, %zmm8
 ; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm6
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = mem[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm0 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $64, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $56, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm7
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm7
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm26, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,1,2,3,4,15,u,u>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm30, %zmm3, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm31, %zmm3, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,5,15,u>
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm10, %zmm5, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <13,u,2,3,4,5,6,14>
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm10, %zmm17, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm31, %zmm3, %zmm6
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm18, %zmm3, %zmm6
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,13,2,3,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm31, %zmm5, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm18, %zmm5, %zmm3
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
@@ -5754,246 +5802,250 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-SLOW-LABEL: store_i64_stride7_vf32:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $2120, %rsp # imm = 0x848
+; AVX512F-ONLY-SLOW-NEXT:    subq $1928, %rsp # imm = 0x788
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    movb $96, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [9,1,9,1,9,1,9,1]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,9,0,3,4,9,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [11,3,11,3,11,3,11,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,10,0,3,2,10,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [2,10,0,3,2,10,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm8, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm12
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[2,3,2,3],zmm2[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,12,0,5,4,12,0,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm22, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,13,6,7,0,13,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,13,6,7,0,13,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm17, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm16, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm12[0],ymm6[0],ymm12[2],ymm6[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k2} = zmm0[2,3,2,3],zmm5[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm10, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm22, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm18, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm19, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm13[0],ymm12[0],ymm13[2],ymm12[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm14, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm8, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm17, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm16, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm19, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm13 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm16[0],ymm0[2],ymm16[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k2} = zmm0[2,3,2,3],zmm29[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm20, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm3, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [13,5,13,5,13,5,13,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm24, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm17, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm5[0],ymm17[0],ymm5[2],ymm17[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k2} = zmm5[2,3,2,3],zmm29[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm5, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm19, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm2, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm14, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm12, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm25, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm24, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm4, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm20, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm26, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm28, %zmm5, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm4, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm18, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm20, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm25, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm20, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm25, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm28, %zmm1, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm28, %zmm1, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm28
 ; AVX512F-ONLY-SLOW-NEXT:    movb $48, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm8[0],zmm27[0],zmm8[2],zmm27[2],zmm8[4],zmm27[4],zmm8[6],zmm27[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm4, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k3} = zmm9[0],zmm23[0],zmm9[2],zmm23[2],zmm9[4],zmm23[4],zmm9[6],zmm23[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,8,0,1,0,8,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [15,7,15,7]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm4, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm25 {%k3} = zmm11[0],zmm13[0],zmm11[2],zmm13[2],zmm11[4],zmm13[4],zmm11[6],zmm13[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm5, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,6,14]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm8, %zmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm22 {%k3} = zmm10[0],zmm11[0],zmm10[2],zmm11[2],zmm10[4],zmm11[4],zmm10[6],zmm11[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm5, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm8, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm6[0],zmm19[0],zmm6[2],zmm19[2],zmm6[4],zmm19[4],zmm6[6],zmm19[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm5, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm21 {%k3} = zmm22[0],zmm0[0],zmm22[2],zmm0[2],zmm22[4],zmm0[4],zmm22[6],zmm0[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm18, %zmm15, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm18, %zmm15, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm15[0],zmm18[0],zmm15[2],zmm18[2],zmm15[4],zmm18[4],zmm15[6],zmm18[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm5, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm8, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm11[0],zmm0[0],zmm11[2],zmm0[2],zmm11[4],zmm0[4],zmm11[6],zmm0[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm11, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm0, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm11, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -6002,1396 +6054,1399 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm10, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm8, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm17 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm11 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm10, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm6 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm10, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm5 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm19, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm23 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm8, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm23 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm15 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm8, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm15 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm5 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm8, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $120, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm7 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = zmm14[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm21 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm21 = zmm12[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-61, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm26 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm3 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = zmm12[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm24[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm25[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-31, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm9 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    movb $6, %sil
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm6 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    movb $6, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm15 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm27 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm16 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm31 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm13 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $56, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm19, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm10 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,11,u,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm21, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,11,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm12, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm12[0],mem[0],ymm12[2],mem[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm12[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm22 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm8, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = zmm24[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,11,u,u,4,5,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm1, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,11,u,4,5,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm18, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm18 = ymm18[0],mem[0],ymm18[2],mem[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k2} = zmm18[2,3,2,3],zmm2[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm11 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm12, %ymm18 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm18 = ymm12[1],mem[1],ymm12[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[0,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm18 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm27 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm25 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm22 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $64, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm10 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm3 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $8, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm28 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm13, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,1,2,3,4,15,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm22, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,12,u,3,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm12, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm10, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <u,1,2,3,4,5,15,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm13, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,1,12,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm14, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,13,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm10, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm10, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <u,1,2,3,4,15,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm11, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,12,u,3,4,5,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm12, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <13,u,2,3,4,5,6,14>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm3, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,5,15,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm10, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,12,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,13,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm11, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [14,1,2,3,4,5,6,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm11
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 1472(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 1472(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1408(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 1280(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1216(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 1280(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 1216(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 960(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 832(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 768(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 960(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 832(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 768(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 704(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 512(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 512(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 384(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 320(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 64(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 1344(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 1088(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 896(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 448(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 896(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 640(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 448(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 192(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 1728(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 1664(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 1600(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 1536(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $2120, %rsp # imm = 0x848
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 1728(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 1664(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 1600(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1536(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    addq $1928, %rsp # imm = 0x788
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i64_stride7_vf32:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $2024, %rsp # imm = 0x7E8
+; AVX512F-ONLY-FAST-NEXT:    subq $1896, %rsp # imm = 0x768
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    movb $96, %r10b
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [9,1,9,1,9,1,9,1]
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,9,0,3,4,9,0,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [11,3,11,3,11,3,11,3]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm19
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [2,10,0,3,2,10,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm3, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm20
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm1[0],ymm19[2],ymm1[2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm2[0],ymm8[2],ymm2[2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[2,3,2,3],zmm4[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm3[2,3,2,3],zmm5[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,3,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm2, %ymm5, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm3, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm3, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm22[0],ymm4[0],ymm22[2],ymm4[2]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm4, %ymm5, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm21[0],ymm1[0],ymm21[2],ymm1[2]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm5, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm30[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,3,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm3, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,12,0,5,4,12,0,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm31
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm4, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,1,12,7,0,1,12,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm13, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm17, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-FAST-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,13,6,7,0,13,6,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm18, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [15,7,15,7,15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [6,13,14,7,6,13,14,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm6[0],ymm23[2],ymm6[2]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm6, %ymm3, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm22[0],ymm2[0],ymm22[2],ymm2[2]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm2, %ymm3, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm16, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[2,3,2,3],zmm18[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k2} = zmm4[2,3,2,3],zmm24[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm1, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm7, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm13, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm17, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm29, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm21, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm29, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm19, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm4, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm21, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm6[2,3,2,3],zmm11[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm1, %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm2, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm14, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm4, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm9, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm29, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm28, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm19, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm26, %zmm0, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm26, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm26, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm22, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    movb $48, %r10b
+; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm6, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm5, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm11[0],zmm16[0],zmm11[2],zmm16[2],zmm11[4],zmm16[4],zmm11[6],zmm16[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm28, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    movb $48, %r10b
-; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k3
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [0,8,0,1,0,8,0,1]
-; AVX512F-ONLY-FAST-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm31, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [1,0,10,2,1,0,10,2]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm7[0],zmm17[0],zmm7[2],zmm17[2],zmm7[4],zmm17[4],zmm7[6],zmm17[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm1, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm2, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm5, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm31, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm4, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm30[0],zmm15[0],zmm30[2],zmm15[2],zmm30[4],zmm15[4],zmm30[6],zmm15[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm1, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm5, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm15, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm24, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm24, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm28, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,14,6,14]
+; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm3, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm9, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm6, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm5, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm3, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm9, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm8, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm31, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm4, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm26[0],zmm3[0],zmm26[2],zmm3[2],zmm26[4],zmm3[4],zmm26[6],zmm3[6]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm26, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm12 {%k3} = zmm19[0],zmm0[0],zmm19[2],zmm0[2],zmm19[4],zmm0[4],zmm19[6],zmm0[6]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm19, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm19, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm19, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm5, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm25[0],zmm8[0],zmm25[2],zmm8[2],zmm25[4],zmm8[4],zmm25[6],zmm8[6]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm25, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm25, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm14[0],zmm0[0],zmm14[2],zmm0[2],zmm14[4],zmm0[4],zmm14[6],zmm0[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm14, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm0, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm14, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm25 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm8, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm25 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm16 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm28 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm28 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm31 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm22, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm20 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm16 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm0, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm8, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm16 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm10 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm0, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm8, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm6 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm31, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $14, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $120, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm13 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm4 = zmm15[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm28 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    movb $-61, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k5}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k3}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm1 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = zmm17[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm21[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k5}
 ; AVX512F-ONLY-FAST-NEXT:    movb $-31, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm30 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm26 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm23 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm25 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm25 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $6, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm18 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm11 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm7 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm5 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    movb $56, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm22, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm18 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm31, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm15 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,11,u,u,4,5,6,7>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm8 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm8 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,11,u,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm12, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,1,11,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm9, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm9[0],mem[0],ymm9[2],mem[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm9[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm9, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,11,u,4,5,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm8, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm8[0],mem[0],ymm8[2],mem[2]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k2} = zmm8[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    movb $64, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm8 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm3 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $8, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm19 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <12,u,u,3,4,5,6,13>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm6, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,1,2,3,4,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm19, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,12,u,3,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm9, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <13,u,2,3,4,5,6,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm8, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,1,2,3,4,5,15,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm6, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,12,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,13,2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm8, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm14 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <12,u,u,3,4,5,6,13>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm12, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <u,1,2,3,4,15,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm14, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,12,u,3,4,5,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm8, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <13,u,2,3,4,5,6,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm3, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,5,15,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm12, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [0,1,12,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm14, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,13,2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm8, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [14,1,2,3,4,5,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 1472(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1408(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 1280(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1216(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 1152(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 1088(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 1472(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 1408(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 1280(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 1216(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 1152(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 1088(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, 1024(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 960(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 832(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 768(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 960(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 832(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 768(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 640(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 640(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 512(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 1344(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 896(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 1344(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 896(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 1664(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 1536(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $2024, %rsp # imm = 0x7E8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 1728(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 1664(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 1600(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1536(%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $1896, %rsp # imm = 0x768
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: store_i64_stride7_vf32:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $2120, %rsp # imm = 0x848
+; AVX512DQ-SLOW-NEXT:    subq $1928, %rsp # imm = 0x788
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rax), %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm7
 ; AVX512DQ-SLOW-NEXT:    movb $96, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [11,3,11,3,11,3,11,3]
 ; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [2,10,0,3,2,10,0,3]
-; AVX512DQ-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm18
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm9, (%rsp) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm9[0],ymm0[2],ymm9[2]
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,0,5,4,12,0,5]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,12,7,0,1,12,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [5,0,14,6,5,0,14,6]
-; AVX512DQ-SLOW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm19
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,13,6,7,0,13,6,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm22
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [6,13,14,7,6,13,14,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm17, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k2} = zmm3[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm16, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm18, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm4[2,3,2,3],zmm7[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm16, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm13, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm8, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm3, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm27
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm16, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm14
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm23, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm7 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm18, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm29
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r9), %ymm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r8), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm11[0],ymm3[2],ymm11[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm3[2,3,2,3],zmm29[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r8), %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm11[0],ymm4[2],ymm11[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm4[2,3,2,3],zmm29[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm3, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm7, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm4, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm7, %zmm21
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm16, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm29
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [13,5,13,5,13,5,13,5]
-; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm25, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm9, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm1
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm25, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm25, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm2, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm30, %zmm17, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm25
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm26
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm26
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm9, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm21, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm23
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm30, %zmm14, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm30, %zmm14, %zmm21
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm30
 ; AVX512DQ-SLOW-NEXT:    movb $48, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k3
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [1,0,10,2,1,0,10,2]
 ; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm10[0],zmm15[0],zmm10[2],zmm15[2],zmm10[4],zmm15[4],zmm10[6],zmm15[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm14
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm14
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm15[0],zmm17[0],zmm15[2],zmm17[2],zmm15[4],zmm17[4],zmm15[6],zmm17[6]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm14
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,0,1,0,8,0,1]
-; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm2, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm18
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm7
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm6, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm8[0],zmm13[0],zmm8[2],zmm13[2],zmm8[4],zmm13[4],zmm8[6],zmm13[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm6, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,8,0,1,0,8,0,1]
+; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm13
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm12
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,6,14]
+; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm5
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm7, %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm17
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm17
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k3} = zmm10[0],zmm24[0],zmm10[2],zmm24[2],zmm10[4],zmm24[4],zmm10[6],zmm24[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm22
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm7, %zmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm15
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm31, %zmm24, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm31, %zmm24, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm24[0],zmm31[0],zmm24[2],zmm31[2],zmm24[4],zmm31[4],zmm24[6],zmm31[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm2, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm6, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k3} = zmm19[0],zmm0[0],zmm19[2],zmm0[2],zmm19[4],zmm0[4],zmm19[6],zmm0[6]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm19, %zmm0, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm31, %zmm27, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm31, %zmm27, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm27[0],zmm31[0],zmm27[2],zmm31[2],zmm27[4],zmm31[4],zmm27[6],zmm31[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm3, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm7, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm19, %zmm31
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm31 {%k3} = zmm15[0],zmm0[0],zmm15[2],zmm0[2],zmm15[4],zmm0[4],zmm15[6],zmm0[6]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm15, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm15, %zmm0, %zmm4
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm15, %zmm31
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm12 {%k1}
 ; AVX512DQ-SLOW-NEXT:    movb $120, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm0 {%k3}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm18 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm18 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm25 {%k3}
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm5[0,1,2,3],zmm26[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $-61, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm23 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm22 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm26 {%k3}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k3}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm1 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm22 {%k3}
 ; AVX512DQ-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k3}
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm3 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm3 = zmm5[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k3}
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm16[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k4}
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm25[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm21[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
 ; AVX512DQ-SLOW-NEXT:    movb $-31, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm9 {%k3}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm20 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm20 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    movb $12, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm14 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm13 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
 ; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm5
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm10 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm5
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm24 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm5
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm2 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm27 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k4}
 ; AVX512DQ-SLOW-NEXT:    movb $112, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm6, %zmm14 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm5 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm5, %zmm10 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm5 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm5, %zmm24 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm5, %zmm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm1, %zmm2 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm6, %zmm13 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm2, %zmm10 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm2, %zmm27 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm9
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm2, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm1, %zmm3 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm20 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm14 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm15 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm17 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm8 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
 ; AVX512DQ-SLOW-NEXT:    movb $56, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm20 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm15 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm8 {%k4}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm5, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k4}
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm2, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm23 {%k1}
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm7 = zmm27[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,11,u,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm16, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,11,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm22, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm16
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm16 = ymm16[0],mem[0],ymm16[2],mem[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k2} = zmm16[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm19 {%k3}
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm16 {%k1}
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm6 = zmm28[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,11,u,u,4,5,6,7>
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,11,u,4,5,6,7>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm7, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%r8), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm7[0],mem[0],ymm7[2],mem[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k2} = zmm7[2,3,2,3],zmm1[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm15 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-SLOW-NEXT:    movb $14, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm16 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm28 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm21 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm23 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3]
 ; AVX512DQ-SLOW-NEXT:    movb $64, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k2}
 ; AVX512DQ-SLOW-NEXT:    movb $8, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k2}
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm30 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm13, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,1,2,3,4,15,u,u>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm19, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,12,u,3,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm12, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm7, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,1,2,3,4,5,15,u>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm13, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,1,12,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm5, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,13,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm12, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm7, %zmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm8 {%k2}
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm30 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <12,u,u,3,4,5,6,13>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm11, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <u,1,2,3,4,15,u,u>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm15, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,12,u,3,4,5,6,7>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm7, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <13,u,2,3,4,5,6,14>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,1,2,3,4,5,15,u>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm9, %zmm11, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,1,12,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,13,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm7, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [14,1,2,3,4,5,6,15]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm7
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 1472(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 1472(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 1408(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 1344(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 1280(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 1344(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 1280(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 1216(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 1152(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 960(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 896(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 832(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 768(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 960(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, 896(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 832(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 768(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 640(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 640(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 512(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 512(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 448(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 192(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 384(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 192(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 1728(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 1664(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 1600(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 1536(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $2120, %rsp # imm = 0x848
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, (%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 1728(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 1664(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 1600(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 1536(%rax)
+; AVX512DQ-SLOW-NEXT:    addq $1928, %rsp # imm = 0x788
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: store_i64_stride7_vf32:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $2056, %rsp # imm = 0x808
+; AVX512DQ-FAST-NEXT:    subq $1864, %rsp # imm = 0x748
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm19
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm13
 ; AVX512DQ-FAST-NEXT:    movb $96, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [9,1,9,1,9,1,9,1]
-; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,9,0,3,4,9,0,3]
-; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm12
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [11,3,11,3,11,3,11,3]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [2,10,0,3,2,10,0,3]
 ; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%r9), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %ymm22
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm7[0],ymm1[0],ymm7[2],ymm1[2]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %ymm16
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %ymm21
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm16[0],ymm3[0],ymm16[2],ymm3[2]
 ; AVX512DQ-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm2[2,3,2,3],zmm8[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,3,7,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm1, %ymm4, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[2,3,2,3],zmm13[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,3,7,7]
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm3, %ymm1, %ymm16
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm5
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm17, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm28, %zmm2
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [6,13,14,7,6,13,14,7]
+; AVX512DQ-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm22[0],ymm4[0],ymm22[2],ymm4[2]
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm4, %ymm1, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm21[0],ymm0[0],ymm21[2],ymm0[2]
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm0, %ymm1, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm20, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm25
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm2[2,3,2,3],zmm25[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
-; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm7, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm15, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [5,0,14,6,5,0,14,6]
-; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm1
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,13,6,7,0,13,6,7]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm17, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [15,7,15,7,15,7,15,7]
-; AVX512DQ-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm26, %zmm0
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [6,13,14,7,6,13,14,7]
-; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm5[0],ymm23[2],ymm5[2]
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm5, %ymm4, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm22[0],ymm3[0],ymm22[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm3, %ymm4, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm19, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm14
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm0[2,3,2,3],zmm14[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm12, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm13, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm7, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm26, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rax), %zmm12
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 {%k2} = zmm8[2,3,2,3],zmm12[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm8
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm8, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm30, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm18, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rax), %zmm21
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm5[2,3,2,3],zmm21[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm26, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [3,0,12,4,3,0,12,4]
-; AVX512DQ-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm15, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm5
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm1, %zmm16
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm5, %zmm23
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm28, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm14, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm30, %zmm21
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [3,0,12,4,3,0,12,4]
+; AVX512DQ-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm30, %zmm3
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm14, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm16, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm30, %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm14, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm9, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm16, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm26, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm30, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm27, %zmm0, %zmm30
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm27, %zmm14
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm27, %zmm16
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm27
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm23, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm23
 ; AVX512DQ-FAST-NEXT:    movb $48, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k3
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,8,0,1,0,8,0,1]
 ; AVX512DQ-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm11, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm22
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,0,10,2,1,0,10,2]
 ; AVX512DQ-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm31
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm31
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm10[0],zmm16[0],zmm10[2],zmm16[2],zmm10[4],zmm16[4],zmm10[6],zmm16[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm7
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm4, %zmm24
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [15,7,15,7]
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm15, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm1, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm9, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm26, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm11, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm8, %zmm17
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm22 {%k3} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm4, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm15, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm18, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm26, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm13, %zmm5, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm13, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm28
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm26, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm20
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm8, %zmm20
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm9[0],zmm19[0],zmm9[2],zmm19[2],zmm9[4],zmm19[4],zmm9[6],zmm19[6]
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm1, %zmm4
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [6,14,6,14]
+; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm5, %zmm6
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm19
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm11, %zmm19
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm8, %zmm17
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm31 {%k3} = zmm12[0],zmm24[0],zmm12[2],zmm24[2],zmm12[4],zmm24[4],zmm12[6],zmm24[6]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm5, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm10, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm8, %zmm5
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm15 {%k3} = zmm27[0],zmm6[0],zmm27[2],zmm6[2],zmm27[4],zmm6[4],zmm27[6],zmm6[6]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm27, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm27, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm2, %zmm27
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k3} = zmm20[0],zmm0[0],zmm20[2],zmm0[2],zmm20[4],zmm0[4],zmm20[6],zmm0[6]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm20, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm20, %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm20, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm2, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm29
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm18
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm26
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm11, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm9
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm8, %zmm9
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm15[0],zmm13[0],zmm15[2],zmm13[2],zmm15[4],zmm13[4],zmm15[6],zmm13[6]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm13, %zmm15, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm13, %zmm15, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm13 {%k3} = zmm12[0],zmm0[0],zmm12[2],zmm0[2],zmm12[4],zmm0[4],zmm12[6],zmm0[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, (%rsp) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm11
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm12, %zmm0, %zmm8
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm13
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    movb $14, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $120, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k4
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k4}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm2 = zmm24[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm4 = zmm6[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k3}
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm31 {%k3}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm15 {%k3}
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k3}
 ; AVX512DQ-FAST-NEXT:    movb $-61, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k5
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k5}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm7 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k5}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    movb $24, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k3}
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm23[0,1,2,3],zmm30[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k3}
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm1 = zmm7[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k5}
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k5}
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,2,3],zmm16[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k5}
 ; AVX512DQ-FAST-NEXT:    movb $-31, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm28 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm27 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm27 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm28 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm24 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm24 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm15 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm15 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    movb $12, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k4}
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm22 {%k4}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm3
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
@@ -7406,363 +7461,371 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm11 {%k4}
 ; AVX512DQ-FAST-NEXT:    movb $112, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k4
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm6, %zmm29 {%k4}
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm5, %zmm22 {%k4}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm3 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm3, %zmm19 {%k4}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm3 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 128(%rax), %zmm3, %zmm26 {%k4}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm6
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm0, %zmm11 {%k4}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm5
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm2, %zmm11 {%k4}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    movb $6, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm31 {%k4}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k4}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k4}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k4}
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm20 {%k4}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm17 {%k4}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm9 {%k4}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm8 {%k4}
 ; AVX512DQ-FAST-NEXT:    movb $56, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm3, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm20 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k4}
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm3, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rax), %zmm2
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm18 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k1}
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm9 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm9 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,11,u,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm16, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm6 = zmm29[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,11,u,u,4,5,6,7>
+; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm7, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <0,1,11,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm12, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%r8), %ymm12
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm12[0],mem[0],ymm12[2],mem[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm12[2,3,2,3],zmm0[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm10, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa 192(%r8), %ymm10
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm10[0],mem[0],ymm10[2],mem[2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm10[2,3,2,3],zmm2[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm16
 ; AVX512DQ-FAST-NEXT:    movb $64, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $8, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm20 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <u,1,2,3,4,15,u,u>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm20, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm12 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <12,u,u,3,4,5,6,13>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm18, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <u,1,2,3,4,15,u,u>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm12, %zmm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,12,u,3,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm9, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <u,1,2,3,4,5,15,u>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm10, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,12,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm10, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <13,u,2,3,4,5,6,14>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,1,2,3,4,5,15,u>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm14, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,12,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,13,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm10, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm6, %zmm10
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 1472(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 1472(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 1408(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 1344(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 1280(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 1216(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 1152(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 1088(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 1280(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 1216(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 960(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 960(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 896(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 832(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 832(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 768(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 640(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 640(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 576(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, 512(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, (%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 1728(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 1664(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 1600(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 1536(%rax)
-; AVX512DQ-FAST-NEXT:    addq $2056, %rsp # imm = 0x808
+; AVX512DQ-FAST-NEXT:    addq $1864, %rsp # imm = 0x748
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
 ; AVX512BW-ONLY-SLOW-LABEL: store_i64_stride7_vf32:
 ; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $2120, %rsp # imm = 0x848
+; AVX512BW-ONLY-SLOW-NEXT:    subq $1928, %rsp # imm = 0x788
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $96, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [9,1,9,1,9,1,9,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,9,0,3,4,9,0,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [11,3,11,3,11,3,11,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,10,0,3,2,10,0,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm10, (%rsp) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [2,10,0,3,2,10,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm8, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %ymm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[2,3,2,3],zmm2[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,12,0,5,4,12,0,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm22, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,13,6,7,0,13,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,13,6,7,0,13,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm17, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm16, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm12[0],ymm6[0],ymm12[2],ymm6[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k2} = zmm0[2,3,2,3],zmm5[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm10, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm22, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm18, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm19, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm13[0],ymm12[0],ymm13[2],ymm12[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm14, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm8, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm17, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm16, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm19, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm13 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %ymm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm16[0],ymm0[2],ymm16[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k2} = zmm0[2,3,2,3],zmm29[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm20, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm3, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [13,5,13,5,13,5,13,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm24, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %ymm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %ymm17, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm5[0],ymm17[0],ymm5[2],ymm17[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k2} = zmm5[2,3,2,3],zmm29[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm5, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm19, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm2, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm14, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm12, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm29
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm25, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm3, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm24, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm4, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm24, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm20, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm26, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm28, %zmm5, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm4, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm18, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm20, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm25, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm30
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm20, %zmm30
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm25, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm28, %zmm1, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm28, %zmm1, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm28
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm8[0],zmm27[0],zmm8[2],zmm27[2],zmm8[4],zmm27[4],zmm8[6],zmm27[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm4, %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k3} = zmm9[0],zmm23[0],zmm9[2],zmm23[2],zmm9[4],zmm23[4],zmm9[6],zmm23[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,8,0,1,0,8,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [15,7,15,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm4, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm25 {%k3} = zmm11[0],zmm13[0],zmm11[2],zmm13[2],zmm11[4],zmm13[4],zmm11[6],zmm13[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm5, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,6,14]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm8, %zmm14
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm22 {%k3} = zmm10[0],zmm11[0],zmm10[2],zmm11[2],zmm10[4],zmm11[4],zmm10[6],zmm11[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm5, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm8, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm6[0],zmm19[0],zmm6[2],zmm19[2],zmm6[4],zmm19[4],zmm6[6],zmm19[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm5, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm21 {%k3} = zmm22[0],zmm0[0],zmm22[2],zmm0[2],zmm22[4],zmm0[4],zmm22[6],zmm0[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm22, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm18, %zmm15, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm18, %zmm15, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm15[0],zmm18[0],zmm15[2],zmm18[2],zmm15[4],zmm18[4],zmm15[6],zmm18[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm5, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm8, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm11[0],zmm0[0],zmm11[2],zmm0[2],zmm11[4],zmm0[4],zmm11[6],zmm0[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm11, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm0, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm11, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -7771,1396 +7834,1399 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm10, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm8, %zmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm17 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm11 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm10, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm6 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm10, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm5 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm19, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm23 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm8, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm23 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm15 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm8, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm15 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm5 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm8, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $120, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm7 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = zmm14[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm21 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm21 = zmm12[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-61, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm26 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm3 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = zmm12[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm24[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm25[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-31, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm9 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm6 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $6, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm15 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm27 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm16 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm31 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm13 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $56, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm10 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,11,u,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm21, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,11,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm12, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm12[0],mem[0],ymm12[2],mem[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm12[2,3,2,3],zmm0[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm22 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm8, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm3 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = zmm24[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,11,u,u,4,5,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm1, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,11,u,4,5,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm18, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm18 = ymm18[0],mem[0],ymm18[2],mem[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k2} = zmm18[2,3,2,3],zmm2[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm11 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm12, %ymm18 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm18 = ymm12[1],mem[1],ymm12[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[0,2,3,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm18 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm27 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm25 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm22 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $64, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm10 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm3 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $8, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm28 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm13, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,1,2,3,4,15,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm22, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,12,u,3,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm12, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm10, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <u,1,2,3,4,5,15,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm13, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,1,12,3,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm14, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,13,2,3,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm10, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm10, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <u,1,2,3,4,15,u,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm11, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,12,u,3,4,5,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm12, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <13,u,2,3,4,5,6,14>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm3, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,5,15,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm19, %zmm10, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,12,3,4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,13,2,3,4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm11, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [14,1,2,3,4,5,6,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 1472(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 1472(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1408(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 1280(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1216(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 1280(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 1216(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 960(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 832(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 768(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 960(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 832(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 768(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 704(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 576(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 512(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 320(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 512(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 384(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 320(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 64(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 64(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 1344(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 1088(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 896(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 448(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 192(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 896(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 640(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 448(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 192(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 1728(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 1664(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 1600(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 1536(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $2120, %rsp # imm = 0x848
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 1728(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 1664(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 1600(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1536(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    addq $1928, %rsp # imm = 0x788
 ; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512BW-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512BW-ONLY-FAST-LABEL: store_i64_stride7_vf32:
 ; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $2024, %rsp # imm = 0x7E8
+; AVX512BW-ONLY-FAST-NEXT:    subq $1896, %rsp # imm = 0x768
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm23
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    movb $96, %r10b
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [9,1,9,1,9,1,9,1]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,9,0,3,4,9,0,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [11,3,11,3,11,3,11,3]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm19
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [2,10,0,3,2,10,0,3]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm3, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm20
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %ymm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %ymm22
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm1[0],ymm19[2],ymm1[2]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm22
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %ymm21
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm8[0],ymm2[0],ymm8[2],ymm2[2]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm18
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[2,3,2,3],zmm4[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm3[2,3,2,3],zmm5[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,3,7,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm2, %ymm5, %ymm18
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm3, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm3, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm22[0],ymm4[0],ymm22[2],ymm4[2]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm4, %ymm5, %ymm22
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm21[0],ymm1[0],ymm21[2],ymm1[2]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm5, %ymm21
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm19, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm30[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,3,7,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm3, %ymm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,12,0,5,4,12,0,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm31
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm4, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,1,12,7,0,1,12,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm5, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm13, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm17, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm25 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,13,6,7,0,13,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm18, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [15,7,15,7,15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [6,13,14,7,6,13,14,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm6[0],ymm23[2],ymm6[2]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm6, %ymm3, %ymm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm22[0],ymm2[0],ymm22[2],ymm2[2]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm2, %ymm3, %ymm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm16, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[2,3,2,3],zmm18[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm2, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm19, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k2} = zmm4[2,3,2,3],zmm24[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm1, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm7, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm13, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm5, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm17, %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm5, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm29, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm19, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm21, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm28
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm29, %zmm28
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm19, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm4, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm21, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm6[2,3,2,3],zmm11[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm1, %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm2, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm14, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm4, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm22
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm10, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm9, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm29, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm28, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm19, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm26, %zmm0, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm26, %zmm19
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm26, %zmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm22, %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm22
+; AVX512BW-ONLY-FAST-NEXT:    movb $48, %r10b
+; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k3
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm6, %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm5, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm11[0],zmm16[0],zmm11[2],zmm16[2],zmm11[4],zmm16[4],zmm11[6],zmm16[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm28, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    movb $48, %r10b
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm31 = [0,8,0,1,0,8,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm31, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [1,0,10,2,1,0,10,2]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm7[0],zmm17[0],zmm7[2],zmm17[2],zmm7[4],zmm17[4],zmm7[6],zmm17[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm1, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm2, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm5, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm31, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm4, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm30[0],zmm15[0],zmm30[2],zmm15[2],zmm30[4],zmm15[4],zmm30[6],zmm15[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm1, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm5, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm15, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm24, %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm24, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm28, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm19
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [6,14,6,14]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm3, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm9, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm6, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm5, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm12[0],zmm23[0],zmm12[2],zmm23[2],zmm12[4],zmm23[4],zmm12[6],zmm23[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm1, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm3, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm9, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm23
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm8, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm31, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm4, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm26[0],zmm3[0],zmm26[2],zmm3[2],zmm26[4],zmm3[4],zmm26[6],zmm3[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm26, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm26, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm12 {%k3} = zmm19[0],zmm0[0],zmm19[2],zmm0[2],zmm19[4],zmm0[4],zmm19[6],zmm0[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm19, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm19, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm19, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm19
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm5, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm25[0],zmm8[0],zmm25[2],zmm8[2],zmm25[4],zmm8[4],zmm25[6],zmm8[6]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm25, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm25, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm14[0],zmm0[0],zmm14[2],zmm0[2],zmm14[4],zmm0[4],zmm14[6],zmm0[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm14, %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm0, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm14, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512BW-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm25 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm3, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm8, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm25 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm16 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm3, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm28 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm3, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm28 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm31 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm22, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm20 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm8
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm16 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm0, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm8, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm16 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm8
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm10 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm0, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm8, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm8
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm6 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm31
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm31, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $14, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $120, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm4 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = zmm15[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm28 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-61, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k5}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = zmm17[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm21[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k5}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-31, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm30 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm26 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm23 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm25 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm25 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $6, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm18 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm11 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm7 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm5 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $56, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm22, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm18 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm31, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm15 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,11,u,u,4,5,6,7>
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm8 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = zmm23[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,11,u,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm12, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,1,11,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm9, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm9
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm9[0],mem[0],ymm9[2],mem[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm9[2,3,2,3],zmm0[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm9, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,11,u,4,5,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm8, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm8[0],mem[0],ymm8[2],mem[2]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k2} = zmm8[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $64, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm8 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm3 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $8, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm19 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <12,u,u,3,4,5,6,13>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm6, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,1,2,3,4,15,u,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm19, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,12,u,3,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm9, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <13,u,2,3,4,5,6,14>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm8, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,1,2,3,4,5,15,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,12,3,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,13,2,3,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm8, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm14 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <12,u,u,3,4,5,6,13>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm12, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <u,1,2,3,4,15,u,u>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm14, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,12,u,3,4,5,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm8, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <13,u,2,3,4,5,6,14>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm3, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,5,15,u>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm13, %zmm12, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [0,1,12,3,4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm14, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,13,2,3,4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm8, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [14,1,2,3,4,5,6,15]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 1472(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1408(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 1280(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1216(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 1152(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 1088(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 1472(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 1408(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 1280(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 1216(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 1152(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 1088(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm1, 1024(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 960(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 832(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 768(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 960(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 832(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 768(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 704(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 640(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 640(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 576(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 512(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 320(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 512(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 384(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 192(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 1344(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 896(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 1344(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 896(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 448(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 1664(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 1536(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $2024, %rsp # imm = 0x7E8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, (%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 1728(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 1664(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 1600(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1536(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    addq $1896, %rsp # imm = 0x768
 ; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512BW-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQBW-SLOW-LABEL: store_i64_stride7_vf32:
 ; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $2120, %rsp # imm = 0x848
+; AVX512DQBW-SLOW-NEXT:    subq $1928, %rsp # imm = 0x788
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm21
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm15
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm10
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm7
 ; AVX512DQBW-SLOW-NEXT:    movb $96, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [11,3,11,3,11,3,11,3]
 ; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [2,10,0,3,2,10,0,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm8, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm18
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm9, (%rsp) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%r9), %ymm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm9[0],ymm0[2],ymm9[2]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,0,5,4,12,0,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,12,7,0,1,12,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [5,0,14,6,5,0,14,6]
-; AVX512DQBW-SLOW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,13,6,7,0,13,6,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [15,7,15,7,15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [6,13,14,7,6,13,14,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm17, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k2} = zmm3[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm16, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm18, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm4[2,3,2,3],zmm7[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm16, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm13, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm8, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm3, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm7
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm27
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm16, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm23, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm7 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm18, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm29
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r9), %ymm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r8), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm3[0],ymm11[0],ymm3[2],ymm11[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm3[2,3,2,3],zmm29[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r8), %ymm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm11[0],ymm4[2],ymm11[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm4[2,3,2,3],zmm29[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm3, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm7, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm4, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm7, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm16, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [13,5,13,5,13,5,13,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm3, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm9, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm25, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm29
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm1
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm25, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm25, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm2, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm30, %zmm17, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm25
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm25
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm9, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm21, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm30, %zmm14, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm30, %zmm14, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm30
 ; AVX512DQBW-SLOW-NEXT:    movb $48, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k3
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [1,0,10,2,1,0,10,2]
 ; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm10[0],zmm15[0],zmm10[2],zmm15[2],zmm10[4],zmm15[4],zmm10[6],zmm15[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm4, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm15[0],zmm17[0],zmm15[2],zmm17[2],zmm15[4],zmm17[4],zmm15[6],zmm17[6]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,0,1,0,8,0,1]
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm2, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm6, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm8[0],zmm13[0],zmm8[2],zmm13[2],zmm8[4],zmm13[4],zmm8[6],zmm13[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm6, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,8,0,1,0,8,0,1]
+; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [6,14,6,14]
+; AVX512DQBW-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm7, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k3} = zmm10[0],zmm24[0],zmm10[2],zmm24[2],zmm10[4],zmm24[4],zmm10[6],zmm24[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm10
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm7, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm15
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm31, %zmm24, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm31, %zmm24, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm24[0],zmm31[0],zmm24[2],zmm31[2],zmm24[4],zmm31[4],zmm24[6],zmm31[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm2, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm6, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k3} = zmm19[0],zmm0[0],zmm19[2],zmm0[2],zmm19[4],zmm0[4],zmm19[6],zmm0[6]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm19, %zmm0, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm31, %zmm27, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm31, %zmm27, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm27[0],zmm31[0],zmm27[2],zmm31[2],zmm27[4],zmm31[4],zmm27[6],zmm31[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm3, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm7, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm19, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm31 {%k3} = zmm15[0],zmm0[0],zmm15[2],zmm0[2],zmm15[4],zmm0[4],zmm15[6],zmm0[6]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm15, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm15, %zmm0, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm31 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm31 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm15, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm12 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movb $120, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm18 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm18 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm25 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm5[0,1,2,3],zmm26[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $-61, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm23 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm22 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm26 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm1 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm22 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm3 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = zmm5[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm16[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm25[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm21[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    movb $-31, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm9 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm20 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm20 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQBW-SLOW-NEXT:    movb $12, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm14 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm13 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
 ; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm6 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm10 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm24 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm2 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm2
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm27 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm2
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    movb $112, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm6, %zmm14 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm5 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm5, %zmm10 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm5 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm5, %zmm24 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm1, %zmm2 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm6, %zmm13 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm2, %zmm10 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm2, %zmm27 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm9
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm2, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm1, %zmm3 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm20 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm14 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm15 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm17 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm8 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    movb $56, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm20 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm15 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm8 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm5, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm2, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm23 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm7 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = zmm27[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,11,u,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm16, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,11,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm22, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm16
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm16 = ymm16[0],mem[0],ymm16[2],mem[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k2} = zmm16[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm19 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm11 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm16 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm6 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm6 = zmm28[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,11,u,u,4,5,6,7>
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,11,u,4,5,6,7>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm7, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%r8), %ymm7
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm7[0],mem[0],ymm7[2],mem[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k2} = zmm7[2,3,2,3],zmm1[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm15 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq (%rsp), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,3,3]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-SLOW-NEXT:    movb $14, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm16 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm28 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm12 = ymm12[1],mem[1],ymm12[3],mem[3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm21 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,3,3]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm23 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # ymm7 = ymm7[1],mem[1],ymm7[3],mem[3]
 ; AVX512DQBW-SLOW-NEXT:    movb $64, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    movb $8, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,3,3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm30 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm13, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <u,1,2,3,4,15,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm19, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,12,u,3,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm12, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm7, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,1,2,3,4,5,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm13, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,1,12,3,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm5, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,13,2,3,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm12, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm7, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm8 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,3,3]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm30 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <12,u,u,3,4,5,6,13>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm11, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <u,1,2,3,4,15,u,u>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm15, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,12,u,3,4,5,6,7>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm7, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <13,u,2,3,4,5,6,14>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,1,2,3,4,5,15,u>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm9, %zmm11, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,1,12,3,4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,13,2,3,4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm7, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [14,1,2,3,4,5,6,15]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm7
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 1472(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 1472(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 1408(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 1344(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 1280(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 1344(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, 1280(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 1216(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, 1152(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, 960(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 896(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 832(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 768(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 960(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, 896(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 832(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 768(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, 704(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, 640(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 640(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 576(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 512(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 512(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 448(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 320(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 192(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 384(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 320(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, 256(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 192(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 1728(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 1664(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 1600(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 1536(%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $2120, %rsp # imm = 0x848
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, 64(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, (%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 1728(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 1664(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 1600(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, 1536(%rax)
+; AVX512DQBW-SLOW-NEXT:    addq $1928, %rsp # imm = 0x788
 ; AVX512DQBW-SLOW-NEXT:    vzeroupper
 ; AVX512DQBW-SLOW-NEXT:    retq
 ;
 ; AVX512DQBW-FAST-LABEL: store_i64_stride7_vf32:
 ; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $2056, %rsp # imm = 0x808
+; AVX512DQBW-FAST-NEXT:    subq $1864, %rsp # imm = 0x748
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rax), %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm9
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm19
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm24
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm7
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rax), %zmm13
 ; AVX512DQBW-FAST-NEXT:    movb $96, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [9,1,9,1,9,1,9,1]
-; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,9,0,3,4,9,0,3]
-; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm12
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [11,3,11,3,11,3,11,3]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm0, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [2,10,0,3,2,10,0,3]
 ; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm3, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%r9), %ymm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%r9), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%r8), %ymm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %ymm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %ymm22
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm7[0],ymm1[0],ymm7[2],ymm1[2]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm3, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm20
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa (%r9), %ymm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%r9), %ymm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %ymm16
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %ymm22
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %ymm21
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm16[0],ymm3[0],ymm16[2],ymm3[2]
 ; AVX512DQBW-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm2[2,3,2,3],zmm8[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,3,7,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm1, %ymm4, %ymm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[2,3,2,3],zmm13[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,3,7,7]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm3, %ymm1, %ymm16
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm3, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm3, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm5
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm17, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm28, %zmm2
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [6,13,14,7,6,13,14,7]
+; AVX512DQBW-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm22[0],ymm4[0],ymm22[2],ymm4[2]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm4, %ymm1, %ymm22
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm21[0],ymm0[0],ymm21[2],ymm0[2]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm0, %ymm1, %ymm21
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm20, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm25
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm2[2,3,2,3],zmm25[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm13
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,1,12,7,0,1,12,7]
-; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm7, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm15, %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [5,0,14,6,5,0,14,6]
-; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm1
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,13,6,7,0,13,6,7]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm3, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm17, %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [15,7,15,7,15,7,15,7]
-; AVX512DQBW-FAST-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm26, %zmm0
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [6,13,14,7,6,13,14,7]
-; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm5[0],ymm23[2],ymm5[2]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm5, %ymm4, %ymm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm22[0],ymm3[0],ymm22[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm3, %ymm4, %ymm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm19, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm14
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm0[2,3,2,3],zmm14[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm12, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm13, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm26, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rax), %zmm12
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 {%k2} = zmm8[2,3,2,3],zmm12[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm8, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm30, %zmm25
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm18, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm27
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rax), %zmm21
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm5[2,3,2,3],zmm21[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm26, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [3,0,12,4,3,0,12,4]
-; AVX512DQBW-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm15, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm1, %zmm16
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm16
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm5, %zmm23
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm23
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm28, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm14, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm30, %zmm21
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [3,0,12,4,3,0,12,4]
+; AVX512DQBW-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm30, %zmm3
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm14, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm16, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm30, %zmm31
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm14, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm9, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm16, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm26, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm23
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm30, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm27, %zmm0, %zmm30
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm27, %zmm14
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm27, %zmm16
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm27
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm23, %zmm20
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm23
 ; AVX512DQBW-FAST-NEXT:    movb $48, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k3
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,8,0,1,0,8,0,1]
 ; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm11, %zmm29
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm28
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm22
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [1,0,10,2,1,0,10,2]
 ; AVX512DQBW-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm10[0],zmm16[0],zmm10[2],zmm16[2],zmm10[4],zmm16[4],zmm10[6],zmm16[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm7
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm4, %zmm24
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [15,7,15,7]
-; AVX512DQBW-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm15, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm1, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm9, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm26, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm11, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm8, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm22 {%k3} = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm4, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm15, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm18, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm26, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm13, %zmm5, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm13, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm28
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm26, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm20
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm20
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm8, %zmm20
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm9[0],zmm19[0],zmm9[2],zmm19[2],zmm9[4],zmm19[4],zmm9[6],zmm19[6]
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm1, %zmm4
+; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [6,14,6,14]
+; AVX512DQBW-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm6
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm5, %zmm6
+; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm28
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm19
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm11, %zmm19
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm8, %zmm17
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm31 {%k3} = zmm12[0],zmm24[0],zmm12[2],zmm24[2],zmm12[4],zmm24[4],zmm12[6],zmm24[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm7
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm5, %zmm7
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm10, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm24
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm12
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm8, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm15 {%k3} = zmm27[0],zmm6[0],zmm27[2],zmm6[2],zmm27[4],zmm6[4],zmm27[6],zmm6[6]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm27, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm27, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm2, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k3} = zmm20[0],zmm0[0],zmm20[2],zmm0[2],zmm20[4],zmm0[4],zmm20[6],zmm0[6]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm20, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm20, %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm20, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm2, %zmm20
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm29
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm29
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm18
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm26
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm11, %zmm26
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm9
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm8, %zmm9
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm15[0],zmm13[0],zmm15[2],zmm13[2],zmm15[4],zmm13[4],zmm15[6],zmm13[6]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm13, %zmm15, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm13, %zmm15, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm13 {%k3} = zmm12[0],zmm0[0],zmm12[2],zmm0[2],zmm12[4],zmm0[4],zmm12[6],zmm0[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, (%rsp) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm11
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm12, %zmm0, %zmm8
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm13
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm12
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $14, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $120, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k4
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24, %zmm2 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm2 = zmm24[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm4 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm4 = zmm6[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm31 {%k3}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm15 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k3}
 ; AVX512DQBW-FAST-NEXT:    movb $-61, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k5
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k5}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm3 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm7 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k5}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm1 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    movb $24, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k3}
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm23[0,1,2,3],zmm30[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k3}
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm1 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm1 = zmm7[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k5}
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k5}
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,2,3],zmm16[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k5}
 ; AVX512DQBW-FAST-NEXT:    movb $-31, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm28 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm27 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm27 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm28 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm24 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm24 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm15 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm15 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQBW-FAST-NEXT:    movb $12, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k4}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm22 {%k4}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdx), %xmm3
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
 ; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
@@ -9175,118 +9241,122 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm11 {%k4}
 ; AVX512DQBW-FAST-NEXT:    movb $112, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm6, %zmm29 {%k4}
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm5, %zmm22 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm3 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm3, %zmm19 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm3 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 128(%rax), %zmm3, %zmm26 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm0, %zmm11 {%k4}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm3, %zmm2
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm2, %zmm11 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm2
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $6, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm31 {%k4}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k4}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k4}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k4}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm20 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm2
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm17 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm2
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm9 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm2
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm8 {%k4}
 ; AVX512DQBW-FAST-NEXT:    movb $56, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm3, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm20 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k4}
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm3, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rax), %zmm2
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm18 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k1}
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm9 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm9 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,11,u,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm16, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm6 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm6 = zmm29[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,11,u,u,4,5,6,7>
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm7 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm7, %zmm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <0,1,11,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm12, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%r8), %ymm12
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm12[0],mem[0],ymm12[2],mem[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm12[2,3,2,3],zmm0[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm10, %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%r8), %ymm10
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm10[0],mem[0],ymm10[2],mem[2]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm10[2,3,2,3],zmm2[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm16
 ; AVX512DQBW-FAST-NEXT:    movb $64, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $8, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm20 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <u,1,2,3,4,15,u,u>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm20, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm12 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <12,u,u,3,4,5,6,13>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm18, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <u,1,2,3,4,15,u,u>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm12, %zmm14
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,12,u,3,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm12, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm9, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <u,1,2,3,4,5,15,u>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm10, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,12,3,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm6
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm10, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <13,u,2,3,4,5,6,14>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,1,2,3,4,5,15,u>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm14, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,12,3,4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,13,2,3,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm12, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm10, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm10
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm6, %zmm10
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, 1472(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 1472(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 1408(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 1344(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, 1280(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 1216(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 1152(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 1088(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 1280(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 1216(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 960(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 960(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, 896(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, 832(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 832(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 768(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 704(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, 640(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, 640(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 576(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 512(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 448(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, 384(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, (%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, (%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 1728(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 1664(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 1600(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 1536(%rax)
-; AVX512DQBW-FAST-NEXT:    addq $2056, %rsp # imm = 0x808
+; AVX512DQBW-FAST-NEXT:    addq $1864, %rsp # imm = 0x748
 ; AVX512DQBW-FAST-NEXT:    vzeroupper
 ; AVX512DQBW-FAST-NEXT:    retq
   %in.vec0 = load <32 x i64>, ptr %in.vecptr0, align 64
@@ -12615,48 +12685,49 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-SLOW-LABEL: store_i64_stride7_vf64:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $6600, %rsp # imm = 0x19C8
+; AVX512F-ONLY-SLOW-NEXT:    subq $6280, %rsp # imm = 0x1888
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,3,11,3,11,3,11,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [2,10,0,3,2,10,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [2,10,0,3,2,10,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $96, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm28, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm3
@@ -12668,710 +12739,718 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [6,13,14,7,6,13,14,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm26, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm9[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm15, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm13, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm2[2,3,2,3],zmm9[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm12, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm20, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm26, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm10, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm28, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r9), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm9[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm4, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm16, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r9), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm6[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm14, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm30, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm28, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm19, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm25
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%r9), %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm9[0],ymm1[2],ymm9[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm21[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm2[2,3,2,3],zmm25[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm12, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm8, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm20, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm14, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm15, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm19, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm28, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm26, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm15, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm13, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%r9), %ymm6
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%r8), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm2[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm18, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%r8), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm9, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm23, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm27, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm20, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm19 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%r8), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm6[0],ymm25[0],ymm6[2],ymm25[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k2} = zmm6[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm28, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm30, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%r8), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm30[0],ymm4[2],ymm30[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm4[2,3,2,3],zmm3[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm10, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm11, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm31, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm5, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,5,13,5,13,5,13,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm26, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm27, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm5, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm26, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm11, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm13, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm26, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm13, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm11, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm26, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm26, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm11, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm13, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm26, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm11, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm5, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm29
 ; AVX512F-ONLY-SLOW-NEXT:    movb $48, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k3} = zmm0[0],zmm3[0],zmm0[2],zmm3[2],zmm0[4],zmm3[4],zmm0[6],zmm3[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm21[0],zmm3[0],zmm21[2],zmm3[2],zmm21[4],zmm3[4],zmm21[6],zmm3[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,0,1,0,8,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,6,14]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [15,7,15,7]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k3} = zmm29[0],zmm0[0],zmm29[2],zmm0[2],zmm29[4],zmm0[4],zmm29[6],zmm0[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
+; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm11, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm3[0],zmm0[2],zmm3[2],zmm0[4],zmm3[4],zmm0[6],zmm3[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm8, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm31, %zmm1, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm27[0],zmm26[0],zmm27[2],zmm26[2],zmm27[4],zmm26[4],zmm27[6],zmm26[6]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm31[0],zmm4[0],zmm31[2],zmm4[2],zmm31[4],zmm4[4],zmm31[6],zmm4[6]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm8, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k3} = zmm21[0],zmm22[0],zmm21[2],zmm22[2],zmm21[4],zmm22[4],zmm21[6],zmm22[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k3} = zmm11[0],zmm22[0],zmm11[2],zmm22[2],zmm11[4],zmm22[4],zmm11[6],zmm22[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm8, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm6[0],zmm0[0],zmm6[2],zmm0[2],zmm6[4],zmm0[4],zmm6[6],zmm0[6]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k3} = zmm12[0],zmm10[0],zmm12[2],zmm10[2],zmm12[4],zmm10[4],zmm12[6],zmm10[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm14, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm15, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm7, %zmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm9[0],zmm5[0],zmm9[2],zmm5[2],zmm9[4],zmm5[4],zmm9[6],zmm5[6]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm19, %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm9[0],zmm0[0],zmm9[2],zmm0[2],zmm9[4],zmm0[4],zmm9[6],zmm0[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm9, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm9, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm12 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm19[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,11,u,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,11,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm19, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm19, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm19[0],zmm12[0],zmm19[2],zmm12[2],zmm19[4],zmm12[4],zmm19[6],zmm12[6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm13[0],zmm0[0],zmm13[2],zmm0[2],zmm13[4],zmm0[4],zmm13[6],zmm0[6]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm13, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm13, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm8 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,11,u,u,4,5,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,1,11,u,4,5,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm7, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
 ; AVX512F-ONLY-SLOW-NEXT:    movb $4, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm12 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,2,10,u,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <12,u,u,3,4,5,6,13>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm25 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,10,u,5,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm25, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,12,u,3,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm19 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k5}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $6, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 456(%rcx), %ymm12
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm1 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,1,2,9,u,u,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm1, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k5}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $64, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm3 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,15,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm1, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm6 = xmm6[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm2 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k4}
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,2,3,4,8,u,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm2, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,2,3,4,8,u,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,15,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,9,u,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm12, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,1,2,3,9,u,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm3, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm2, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm8, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,10,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,12,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%r9), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%r8), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,10,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,12,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%r9), %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm14[0],ymm4[2],ymm14[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm4[2,3,2,3],zmm3[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%r8), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,8,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm6, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,9,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,13,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [14,1,2,3,4,5,6,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm10, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm13
 ; AVX512F-ONLY-SLOW-NEXT:    movb $8, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k5}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k5}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k5}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k5}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k5}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k5}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-31, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm1 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm6 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm29 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm21 {%k4}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm19, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm4 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm30 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm23 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm19 {%k4}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm2, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 192(%rax), %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 256(%rax), %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 320(%rax), %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 384(%rax), %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 256(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 320(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $3, 384(%rax), %zmm25, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm19 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm6 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm1 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm1 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 264(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 264(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 328(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm26 {%k3}
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 392(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm4 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 328(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm26 {%k3}
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq 392(%rcx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm21 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $56, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm22 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm21 {%k2}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
@@ -13382,48 +13461,41 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    movb $120, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm25 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm6 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm31 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm28 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm20
 ; AVX512F-ONLY-SLOW-NEXT:    movb $-61, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm15 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm25 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm12 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = zmm0[0,1,2,3],mem[4,5,6,7]
@@ -13432,136 +13504,142 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm7 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = zmm13[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # zmm8 = zmm15[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm13 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm11 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm28 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm10 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm15 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm24 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm20 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm0 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm29 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 3008(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 2944(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 2880(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm7, 2816(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2752(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 2624(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 2560(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 2496(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 3008(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 2944(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 2880(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm8, 2816(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 2752(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 2688(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2624(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 2560(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 2496(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 2432(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm6, 2368(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm7, 2368(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 2240(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 2240(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 2176(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 2048(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 2112(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 2048(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1984(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm4, 1920(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1856(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 1792(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm5, 1920(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1792(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1728(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1664(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 1600(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1664(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 1536(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm3, 1472(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm4, 1472(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 1216(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 1152(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 1088(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm2, 1024(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1216(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 1152(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 1088(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm3, 1024(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 768(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm1, 576(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 768(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 704(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 640(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm2, 576(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm2, 128(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 3520(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 3520(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -13575,694 +13653,715 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 3072(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $6600, %rsp # imm = 0x19C8
+; AVX512F-ONLY-SLOW-NEXT:    addq $6280, %rsp # imm = 0x1888
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i64_stride7_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $6696, %rsp # imm = 0x1A28
+; AVX512F-ONLY-FAST-NEXT:    subq $6184, %rsp # imm = 0x1828
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,3,11,3,11,3,11,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,10,0,3,2,10,0,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [2,10,0,3,2,10,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    movb $96, %r10b
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [4,9,0,3,4,9,0,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm27, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm13, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [5,0,14,6,5,0,14,6]
-; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [15,7,15,7,15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [6,13,14,7,6,13,14,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm16, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm10, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm12, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm26[0],ymm11[0],ymm26[2],ymm11[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm4[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm27, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm16, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [6,13,14,7,6,13,14,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r8), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm6[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm13, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm28[0],ymm16[0],ymm28[2],ymm16[2]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm27, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm18, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm12, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm8, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm12, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm12[0],ymm17[0],ymm12[2],ymm17[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm3[2,3,2,3],zmm30[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm27, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm18, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm13, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r8), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm5[0],ymm0[0],ymm5[2],ymm0[2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm3[2,3,2,3],zmm24[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm9, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm11, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm8, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm20, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm8, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm16, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm5, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm7, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rax), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%r9), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm22[0],ymm9[0],ymm22[2],ymm9[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm17, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm8[0],ymm19[0],ymm8[2],ymm19[2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm8, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm2, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm11, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm27, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm17, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rax), %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%r9), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%r8), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm8[2,3,2,3],zmm27[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm15, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm16, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm29, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm30, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm11, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm12, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm13, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rax), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%r9), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%r9), %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%r8), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k2} = zmm10[2,3,2,3],zmm1[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm11[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm14, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm17, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm24, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm15, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm13, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm26 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm26, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm25 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm17, %ymm0, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm9, %ymm0, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm4, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm28 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm23 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm19, %ymm0, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm29, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm5, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm10, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%r9), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%r8), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [3,0,12,4,3,0,12,4]
-; AVX512F-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [3,0,12,4,3,0,12,4]
+; AVX512F-ONLY-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,5,13,5,13,5,13,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm30, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm15, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    movb $48, %r10b
-; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k3
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,8,0,1,0,8,0,1]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm10, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,0,10,2,1,0,10,2]
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm0[0],zmm23[0],zmm0[2],zmm23[2],zmm0[4],zmm23[4],zmm0[6],zmm23[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm12, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm29 = [6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # ymm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm29, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [15,7,15,7]
-; AVX512F-ONLY-FAST-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm25, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm27, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm30, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm3[0],zmm0[0],zmm3[2],zmm0[2],zmm3[4],zmm0[4],zmm3[6],zmm0[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm23, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm20, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm12, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm20, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm30, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm14, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm15, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm0, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm23, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm20, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm14, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm20, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm14, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm0, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm27, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm23, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm31, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm2, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm31, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm31, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    movb $48, %r10b
+; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k3
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm19[0],zmm2[2],zmm19[2],zmm2[4],zmm19[4],zmm2[6],zmm19[6]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm13[0],zmm0[0],zmm13[2],zmm0[2],zmm13[4],zmm0[4],zmm13[6],zmm0[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,5,13,5,13,5,13,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [6,14,6,14]
+; AVX512F-ONLY-FAST-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [15,7,15,7]
+; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm13[0],zmm0[0],zmm13[2],zmm0[2],zmm13[4],zmm0[4],zmm13[6],zmm0[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm6, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k3} = zmm2[0],zmm30[0],zmm2[2],zmm30[2],zmm2[4],zmm30[4],zmm2[6],zmm30[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm29, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm22, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm25, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm10, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm11[0],zmm2[2],zmm11[2],zmm2[4],zmm11[4],zmm2[6],zmm11[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k3} = zmm2[0],zmm26[0],zmm2[2],zmm26[2],zmm2[4],zmm26[4],zmm2[6],zmm26[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm12, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm7, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm29, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm22, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm25, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm3, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm30, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm15, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm6, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm0[0],zmm18[0],zmm0[2],zmm18[2],zmm0[4],zmm18[4],zmm0[6],zmm18[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm7, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm22, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm13[0],zmm0[2],zmm13[2],zmm0[4],zmm13[4],zmm0[6],zmm13[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm29, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm25, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm3, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm27, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm12, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm30, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm15, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm12[0],zmm0[2],zmm12[2],zmm0[4],zmm12[4],zmm0[6],zmm12[6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm20[0],zmm0[2],zmm20[2],zmm0[4],zmm20[4],zmm0[6],zmm20[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm29, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm25, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm30, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm27, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm12, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm22, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm5, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm15, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm12, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [11,3,11,3,11,3,11,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm6, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm12, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm7, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k3} = zmm6[0],zmm5[0],zmm6[2],zmm5[2],zmm6[4],zmm5[4],zmm6[6],zmm5[6]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm25, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm11[0],zmm0[0],zmm11[2],zmm0[2],zmm11[4],zmm0[4],zmm11[6],zmm0[6]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm0, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm16[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,11,u,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm20, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k3} = zmm12[0],zmm14[0],zmm12[2],zmm14[2],zmm12[4],zmm14[4],zmm12[6],zmm14[6]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm12, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm12, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm3, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm10[0],zmm5[0],zmm10[2],zmm5[2],zmm10[4],zmm5[4],zmm10[6],zmm5[6]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm10, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm5 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,11,u,u,4,5,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm28, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <0,1,11,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm5, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm15
 ; AVX512F-ONLY-FAST-NEXT:    movb $4, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <0,1,2,10,u,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm14, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <12,u,u,3,4,5,6,13>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,12,u,3,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm5, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [0,8,0,8,0,8,0,8]
-; AVX512F-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm17 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm20, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <0,1,2,10,u,5,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <12,u,u,3,4,5,6,13>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm4, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,12,u,3,4,5,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    movb $24, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    movb $6, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k5
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,9,u,u,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm7, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,2,9,u,u,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm1, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    movb $64, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm5 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,15,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [9,1,9,1,9,1,9,1]
+; AVX512F-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
+; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [5,0,14,6,5,0,14,6]
+; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm0, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,1,2,3,4,8,u,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,15,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,9,u,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm1, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <13,u,2,3,4,5,6,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm2, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm9 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm18 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm25 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm28 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm23 {%k5}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rax), %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm11 = xmm11[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm6 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm6, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,2,3,4,8,u,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,5,15,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,2,3,9,u,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <13,u,2,3,4,5,6,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm16 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm17 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm19 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm21 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm29 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm26 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k5}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rax), %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,10,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm13, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,12,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm19, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rax), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k2} = zmm3[2,3,2,3],zmm1[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,9,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm7, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,13,2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rax), %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,9,0,3,4,9,0,3]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[2,3,2,3],zmm2[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,13,6,7,0,13,6,7]
+; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,8,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm11, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,9,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm0, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,13,2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm5, %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    movb $8, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm15 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k4}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k4}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k4}
 ; AVX512F-ONLY-FAST-NEXT:    movb $-31, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -14270,122 +14369,114 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm1 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm1 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm10 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm13 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm24 {%k3}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm5 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm30 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm27 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm25 {%k3}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm2, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    movb $112, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 256(%rax), %zmm17, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 320(%rax), %zmm26, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 384(%rax), %zmm20, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 256(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 320(%rax), %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $3, 384(%rax), %zmm4, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm25 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    movb $56, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm23 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm24 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $14, %cl
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm21 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm19 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm19 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm18 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm15 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm15 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm14 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm27 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm23 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
@@ -14393,42 +14484,51 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $120, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm16 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm20 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm13 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm20 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm11 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $-61, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm4 = zmm4[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
@@ -14437,82 +14537,72 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = zmm10[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm13 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm13 = zmm12[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm13 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm29[0,1,2,3],zmm30[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm17 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm9 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm9 = zmm22[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm9 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 2944(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 2880(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm12, 2816(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 2752(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 2688(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm12, 2624(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 2560(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 3008(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 2944(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 2880(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm9, 2816(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 2752(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 2688(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm9, 2624(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 2560(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 2496(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 2432(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm12, 2368(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 2304(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 2240(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm8, 2368(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 2240(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2176(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 2112(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 2112(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 2048(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 1984(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm10, 1920(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 1856(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 1984(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm7, 1920(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 1856(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 1792(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1728(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1664(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 1600(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 1664(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 1600(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 1536(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm9, 1472(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm5, 1472(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1216(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm7, 1024(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1216(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 1152(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 1088(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm4, 1024(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm5, 576(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 768(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 704(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 640(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, 576(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -14520,77 +14610,75 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, 128(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 3520(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 3520(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 3456(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3328(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3264(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3200(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 3072(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 3200(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 3072(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $6696, %rsp # imm = 0x1A28
+; AVX512F-ONLY-FAST-NEXT:    addq $6184, %rsp # imm = 0x1828
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: store_i64_stride7_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $6472, %rsp # imm = 0x1948
+; AVX512DQ-SLOW-NEXT:    subq $6216, %rsp # imm = 0x1848
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm18, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm5
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [11,3,11,3,11,3,11,3]
-; AVX512DQ-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,10,0,3,2,10,0,3]
-; AVX512DQ-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm16
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    movb $96, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rax), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm17, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm5
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm3
@@ -14602,761 +14690,778 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [5,0,14,6,5,0,14,6]
-; AVX512DQ-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,13,6,7,0,13,6,7]
-; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [6,13,14,7,6,13,14,7]
-; AVX512DQ-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm17, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r9), %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r8), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm16, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm25, %zmm2
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [6,13,14,7,6,13,14,7]
+; AVX512DQ-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm5, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm17, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%r9), %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%r8), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm6[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm9, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm15, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm2[2,3,2,3],zmm9[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm12, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm17, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm25, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r9), %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r8), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm6[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm15
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm14
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm21, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm17, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm29, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm27, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%r9), %ymm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%r8), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm7, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%r9), %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%r8), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm7[0],ymm2[2],ymm7[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm2[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm2[2,3,2,3],zmm27[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm12, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm16, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm17, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm15, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm30
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm25, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm27
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm7, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%r9), %ymm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%r8), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm2[2,3,2,3],zmm23[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm15, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm30, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm20 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%r9), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%r8), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm4[2,3,2,3],zmm2[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm31, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm16, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r9), %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%r8), %ymm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm6[0],ymm23[0],ymm6[2],ymm23[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k2} = zmm6[2,3,2,3],zmm2[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm25, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm26, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,5,13,5,13,5,13,5]
-; AVX512DQ-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm4, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm26, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm13, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm26, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm5, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm13, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm9, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm26, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm5, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm26, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm9, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm26, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm27, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm5, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm29
 ; AVX512DQ-SLOW-NEXT:    movb $48, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k3
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
 ; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm4[0],zmm0[2],zmm4[2],zmm0[4],zmm4[4],zmm0[6],zmm4[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm19[0],zmm3[0],zmm19[2],zmm3[2],zmm19[4],zmm3[4],zmm19[6],zmm3[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,0,1,0,8,0,1]
 ; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm8
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,6,14]
+; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
+; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm3[0],zmm0[2],zmm3[2],zmm0[4],zmm3[4],zmm0[6],zmm3[6]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [15,7,15,7]
-; AVX512DQ-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm15 {%k3} = zmm0[0],zmm4[0],zmm0[2],zmm4[2],zmm0[4],zmm4[4],zmm0[6],zmm4[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm22[0],zmm23[0],zmm22[2],zmm23[2],zmm22[4],zmm23[4],zmm22[6],zmm23[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm9, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm13, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm8, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm31, %zmm1, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm26 {%k3} = zmm28[0],zmm21[0],zmm28[2],zmm21[2],zmm28[4],zmm21[4],zmm28[6],zmm21[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm2, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm9, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm14 {%k3} = zmm31[0],zmm22[0],zmm31[2],zmm22[2],zmm31[4],zmm22[4],zmm31[6],zmm22[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm31
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm13, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm8, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k3} = zmm11[0],zmm18[0],zmm11[2],zmm18[2],zmm11[4],zmm18[4],zmm11[6],zmm18[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm17
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm13, %zmm31
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm15, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm25[0],zmm17[0],zmm25[2],zmm17[2],zmm25[4],zmm17[4],zmm25[6],zmm17[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm7, %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm30
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm30
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k3} = zmm12[0],zmm21[0],zmm12[2],zmm21[2],zmm12[4],zmm21[4],zmm12[6],zmm21[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm2, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm13, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm15, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k3} = zmm7[0],zmm10[0],zmm7[2],zmm10[2],zmm7[4],zmm10[4],zmm7[6],zmm10[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm9, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm13, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm7, %zmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm9[0],zmm5[0],zmm9[2],zmm5[2],zmm9[4],zmm5[4],zmm9[6],zmm5[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm22
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm25
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm10
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm9, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm6[0],zmm0[0],zmm6[2],zmm0[2],zmm6[4],zmm0[4],zmm6[6],zmm0[6]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm4[0,1,2,3],zmm19[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm12, %zmm18, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm12, %zmm18, %zmm13
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm12, %zmm18, %zmm15
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm18[0],zmm12[0],zmm18[2],zmm12[2],zmm18[4],zmm12[4],zmm18[6],zmm12[6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm14
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm10[0],zmm0[0],zmm10[2],zmm0[2],zmm10[4],zmm0[4],zmm10[6],zmm0[6]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm10, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm10, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm23 {%k1}
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm8 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,11,u,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,11,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm15
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,1,11,u,4,5,6,7>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm7, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
 ; AVX512DQ-SLOW-NEXT:    movb $4, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,10,u,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm12, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <12,u,u,3,4,5,6,13>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,12,u,3,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm3, %zmm5
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm19 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm10 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm23, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,12,u,3,4,5,6,7>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm4
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-SLOW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm18 {%k5}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k5}
 ; AVX512DQ-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 456(%rcx), %ymm12
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm1 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,1,2,9,u,u,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k5}
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k5}
 ; AVX512DQ-SLOW-NEXT:    movb $64, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,15,u,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm14
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm17
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm23
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm5
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm6
 ; AVX512DQ-SLOW-NEXT:    movb $12, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm4
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm2 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm3
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,1,2,3,4,8,u,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm2, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,2,3,4,8,u,7>
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,15,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm14
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,9,u,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm12, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm8, %zmm12
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm10
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,10,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,12,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%r9), %ymm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%r8), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm0[0],ymm7[0],ymm0[2],ymm7[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm7[2,3,2,3],zmm5[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm0
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm9
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%r9), %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%r8), %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm8[0],ymm4[2],ymm8[2]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,9,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,13,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm6
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm20
+; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,8,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [14,1,2,3,4,5,6,15]
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm10
 ; AVX512DQ-SLOW-NEXT:    movb $8, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k5}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k5}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k5}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k5}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k5}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k5}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k5}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k5}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k5}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k5}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21 {%k5}
 ; AVX512DQ-SLOW-NEXT:    movb $-31, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm5 {%k4}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm30 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm27 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm25 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm22 {%k4}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm21 {%k4}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm18 {%k4}
 ; AVX512DQ-SLOW-NEXT:    movb $112, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm19, %zmm7 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm1, %zmm2 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm10, %zmm4 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm1, %zmm5 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 256(%rax), %zmm1, %zmm30 {%k2}
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm1, %zmm0 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 320(%rax), %zmm1, %zmm25 {%k2}
-; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 384(%rax), %zmm3, %zmm21 {%k2}
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm1, %zmm4 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm0, %zmm2 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm0, %zmm3 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 256(%rax), %zmm0, %zmm27 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 320(%rax), %zmm0, %zmm22 {%k2}
+; AVX512DQ-SLOW-NEXT:    vinserti64x2 $3, 384(%rax), %zmm23, %zmm18 {%k2}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm5 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k3}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k3}
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k3}
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 264(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k3}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm30 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 328(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm26 {%k3}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastq 392(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm22 {%k3}
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm19 {%k3}
 ; AVX512DQ-SLOW-NEXT:    movb $56, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm22 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm19 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
 ; AVX512DQ-SLOW-NEXT:    movb $120, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm15 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm15 = zmm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm27 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm16 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm10 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm6 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm31 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm28 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm20 {%k1}
 ; AVX512DQ-SLOW-NEXT:    movb $-61, %cl
 ; AVX512DQ-SLOW-NEXT:    kmovw %ecx, %k1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm14 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm14 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm11 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm11 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm11 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm3 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm11 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm4 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm4 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm4 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm5 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm5 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm5 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm7 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm7 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm8 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm8 {%k1}
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # zmm8 = zmm15[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
@@ -15371,111 +15476,111 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm17 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm15 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm28 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm25 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm2 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm0 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm29 {%k1}
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 3008(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 2944(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 2880(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 2752(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 2624(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 2944(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, 2880(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm8, 2816(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 2752(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 2688(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 2624(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 2560(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, 2496(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 2432(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, 2176(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 2496(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 2432(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm7, 2368(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 2304(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 2240(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 2112(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 2048(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 1984(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 1792(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, 2048(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 1984(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm5, 1920(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 1856(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, 1792(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1728(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 1664(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 1600(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 1536(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 1536(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm4, 1472(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 1216(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, 1152(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 1088(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 1216(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 1152(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 1088(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, 1024(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, 768(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 576(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 768(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 704(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 640(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, 576(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 256(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm1, 128(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 3520(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 3520(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -15486,959 +15591,961 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3264(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3200(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 3072(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, 3072(%rax)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $6472, %rsp # imm = 0x1948
+; AVX512DQ-SLOW-NEXT:    addq $6216, %rsp # imm = 0x1848
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: store_i64_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $6568, %rsp # imm = 0x19A8
+; AVX512DQ-FAST-NEXT:    subq $6120, %rsp # imm = 0x17E8
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,3,11,3,11,3,11,3]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,10,0,3,2,10,0,3]
-; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, (%rsp) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm17
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    movb $96, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
-; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm12, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm27, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm14, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %ymm16
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %ymm30
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
 ; AVX512DQ-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm20
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm22, %zmm0
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
 ; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
 ; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [15,7,15,7,15,7,15,7]
-; AVX512DQ-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [6,13,14,7,6,13,14,7]
-; AVX512DQ-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm30[0],ymm9[0],ymm30[2],ymm9[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm4[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm17, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm3
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm18, %zmm2
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [6,13,14,7,6,13,14,7]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rax), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %ymm24
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm24[0],ymm0[0],ymm24[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm5[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm27, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm14, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm28[0],ymm16[0],ymm28[2],ymm16[2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm16, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm23, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm21
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm8, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm30
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm27, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm12, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm16
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm14, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rax), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%r8), %ymm11
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm11[0],ymm25[0],ymm11[2],ymm25[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[2,3,2,3],zmm23[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rax), %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%r8), %ymm12
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm24
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm25[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm13, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm19
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm23, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm20, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm17, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm10, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm7, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm18, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm8, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm31
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rax), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%r9), %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%r8), %ymm5
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm5[0],ymm9[0],ymm5[2],ymm9[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k2} = zmm4[2,3,2,3],zmm23[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r8), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r9), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm3, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm26
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm27, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa 192(%r8), %ymm13
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm13[0],ymm20[0],ymm13[2],ymm20[2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm25
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm19, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm18, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm22, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm19
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm22, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm18, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm17
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rax), %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%r9), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%r8), %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm10[2,3,2,3],zmm21[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r8), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r9), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm3, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rax), %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%r9), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%r8), %ymm4
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm7[2,3,2,3],zmm27[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r8), %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r9), %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm25, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm23, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm22, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm18, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm19, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm18, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm17, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm8, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm14, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm12
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm3, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rax), %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa 320(%r9), %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 320(%r8), %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm11[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r8), %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r9), %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm14, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm11
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm25, %zmm27
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm23, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,7,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm30 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm30, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm28 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm24 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm24, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm25, %ymm0, %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm9, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %ymm4, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm20, %ymm0, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm5, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %ymm10, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 384(%r9), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa 384(%r8), %ymm2
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpermt2q %ymm1, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [3,0,12,4,3,0,12,4]
-; AVX512DQ-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
-; AVX512DQ-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm21, %zmm2
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [3,0,12,4,3,0,12,4]
+; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm24, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm23, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm24, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm24, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm24, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm0, %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm24, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm23, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm10
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [2,10,0,3,2,10,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm24, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm23, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm2
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm9, %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm9, %zmm2, %zmm24
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm9, %zmm23
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm9, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm9
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm13
 ; AVX512DQ-FAST-NEXT:    movb $48, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k3
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,8,0,1,0,8,0,1]
-; AVX512DQ-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
+; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
+; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,0,10,2,1,0,10,2]
-; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm8[0],zmm0[0],zmm8[2],zmm0[2],zmm8[4],zmm0[4],zmm8[6],zmm0[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,5,13,5,13,5,13,5]
+; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm26[0],zmm0[2],zmm26[2],zmm0[4],zmm26[4],zmm0[6],zmm26[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm30, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm18 = [15,7,15,7]
-; AVX512DQ-FAST-NEXT:    # ymm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm18, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [6,14,6,14]
+; AVX512DQ-FAST-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [15,7,15,7]
+; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm21, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm19
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm8[0],zmm0[0],zmm8[2],zmm0[2],zmm8[4],zmm0[4],zmm8[6],zmm0[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm8, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm4[0],zmm2[0],zmm4[2],zmm2[2],zmm4[4],zmm2[4],zmm4[6],zmm2[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm18, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm8, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm19[0],zmm2[2],zmm19[2],zmm2[4],zmm19[4],zmm2[6],zmm19[6]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm6, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm20
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k3} = zmm2[0],zmm30[0],zmm2[2],zmm30[2],zmm2[4],zmm30[4],zmm2[6],zmm30[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm30, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm22, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm3, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm16[0],zmm2[2],zmm16[2],zmm2[4],zmm16[4],zmm2[6],zmm16[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm30
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm14 {%k3} = zmm2[0],zmm26[0],zmm2[2],zmm26[2],zmm2[4],zmm26[4],zmm2[6],zmm26[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm7, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm30, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm22, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm18, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm3, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm8, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm27
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm5[0],zmm0[0],zmm5[2],zmm0[2],zmm5[4],zmm0[4],zmm5[6],zmm0[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm26
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k3} = zmm0[0],zmm12[0],zmm0[2],zmm12[2],zmm0[4],zmm12[4],zmm0[6],zmm12[6]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm11, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm19
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm31[0],zmm0[2],zmm31[2],zmm0[4],zmm31[4],zmm0[6],zmm31[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm30, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm31, %zmm18, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm3, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm8, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm7, %zmm28
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm23[0],zmm20[0],zmm23[2],zmm20[2],zmm23[4],zmm20[4],zmm23[6],zmm20[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm22, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm30, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [11,3,11,3,11,3,11,3]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm25
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm21
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm21
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm12, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm18, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm13
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k3} = zmm12[0],zmm15[0],zmm12[2],zmm15[2],zmm12[4],zmm15[4],zmm12[6],zmm15[6]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm12, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm12, %zmm22
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm3, %zmm12
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm11[0],zmm5[0],zmm11[2],zmm5[2],zmm11[4],zmm5[4],zmm11[6],zmm5[6]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm11, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm5, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm2[0,1,2,3],zmm18[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r8), %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,11,u,u,4,5,6,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm28, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r9), %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,1,11,u,4,5,6,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm18
+; AVX512DQ-FAST-NEXT:    movb $4, %sil
+; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm10 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <0,1,2,10,u,5,6,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm16
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm29, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm8, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm4
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm4, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <12,u,u,3,4,5,6,13>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm4, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,12,u,3,4,5,6,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm14
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm7, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm6[0],zmm5[0],zmm6[2],zmm5[2],zmm6[4],zmm5[4],zmm6[6],zmm5[6]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm6
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k3} = zmm12[0],zmm4[0],zmm12[2],zmm4[2],zmm12[4],zmm4[4],zmm12[6],zmm4[6]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm12, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm12, %zmm4, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r8), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,11,u,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r9), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,1,11,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm4, %zmm14
-; AVX512DQ-FAST-NEXT:    movb $4, %sil
-; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm13 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,1,2,10,u,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm13, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <12,u,u,3,4,5,6,13>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,12,u,3,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm10, %zmm18
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm23 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm27
 ; AVX512DQ-FAST-NEXT:    movb $24, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm12 {%k4}
 ; AVX512DQ-FAST-NEXT:    movb $6, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k5
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k5}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,9,u,u,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm7, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm12 {%k4}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm9
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k5}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,2,9,u,u,6,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm1, %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k4}
 ; AVX512DQ-FAST-NEXT:    movb $64, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,15,u,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r8), %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r9), %zmm2
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,15,u,u>
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm0, %zmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r8), %zmm10
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm0, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm4
+; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [9,1,9,1,9,1,9,1]
+; AVX512DQ-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm15
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
+; AVX512DQ-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm8
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [5,0,14,6,5,0,14,6]
+; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm10, %zmm0, %zmm2
 ; AVX512DQ-FAST-NEXT:    movb $12, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k3
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdx), %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm11 {%k3}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm11, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,4,8,u,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm5, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,1,2,3,4,5,15,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,2,3,9,u,6,7>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm1, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <13,u,2,3,4,5,6,14>
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm5, %zmm12
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm9 {%k5}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm31 {%k5}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm8 {%k5}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k5}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm20
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm20 {%k5}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm28 {%k5}
-; AVX512DQ-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k5}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rax), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,10,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,12,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm18, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rax), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm16 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k2} = zmm17[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm16, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdx), %xmm10
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm6 {%k3}
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,1,2,3,4,8,u,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,5,15,u>
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <0,1,2,3,9,u,6,7>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <13,u,2,3,4,5,6,14>
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm9
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm11
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm6 {%k5}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm17
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k5}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k5}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k5}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm27 {%k5}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm26 {%k5}
+; AVX512DQ-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k5}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rax), %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,10,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm16, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,12,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm14, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rax), %zmm1
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm1[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,12,7,0,1,12,7]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
+; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,8,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm7, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [14,1,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm13, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,13,2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm28
+; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm5, %zmm11
 ; AVX512DQ-FAST-NEXT:    movb $8, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm18 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k4}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm26 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k4}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k4}
 ; AVX512DQ-FAST-NEXT:    movb $-31, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm13 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm1 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm1 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm11 {%k3}
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm25 {%k3}
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm5 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm29 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm31 {%k3}
+; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm25 {%k3}
 ; AVX512DQ-FAST-NEXT:    movb $112, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm0, %zmm13 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm0, %zmm1 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm3, %zmm0 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm3, %zmm1 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 128(%rax), %zmm0, %zmm2 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 128(%rax), %zmm3, %zmm2 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm3, %zmm5 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm0, %zmm5 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 256(%rax), %zmm23, %zmm7 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 320(%rax), %zmm27, %zmm11 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm27
-; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 384(%rax), %zmm10, %zmm25 {%k2}
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 256(%rax), %zmm0, %zmm29 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 320(%rax), %zmm0, %zmm31 {%k2}
+; AVX512DQ-FAST-NEXT:    vinserti64x2 $3, 384(%rax), %zmm4, %zmm25 {%k2}
 ; AVX512DQ-FAST-NEXT:    movb $56, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm20 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm30 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm24 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm21 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    movb $14, %cl
 ; AVX512DQ-FAST-NEXT:    kmovw %ecx, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm23 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm20 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k2}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm19 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k2}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm18 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm16 {%k2}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm15 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm14 {%k2}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm6 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k2}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k2}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm24 {%k2}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm7 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $120, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm3 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm16 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm18 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $-61, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm5 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm7 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm8 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm9 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm9 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm10 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm11 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm11 = zmm30[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm4 = zmm4[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm5 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm10 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm10 = zmm22[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm10 {%k1}
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 3008(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 2944(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, 2880(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 2816(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 2752(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 3008(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 2944(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 2880(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm10, 2816(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 2752(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, 2688(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 2624(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 2560(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm10, 2624(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 2560(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 2496(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 2432(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 2368(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 2304(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 2240(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 2432(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm9, 2368(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 2240(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2176(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 2112(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 2112(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 2048(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 1984(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 1920(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 1856(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 1984(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm8, 1920(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 1856(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, 1792(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1728(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 1664(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 1600(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 1536(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 1472(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 1664(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 1600(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 1536(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm7, 1472(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, 1408(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 1216(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 1152(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 1088(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 1024(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 1216(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 1152(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 1088(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm5, 1024(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 512(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 768(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 640(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm4, 576(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, 128(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 3520(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 3520(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 3456(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -16447,57 +16554,58 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3264(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3200(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 3072(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 3072(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512DQ-FAST-NEXT:    addq $6568, %rsp # imm = 0x19A8
+; AVX512DQ-FAST-NEXT:    addq $6120, %rsp # imm = 0x17E8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
 ; AVX512BW-ONLY-SLOW-LABEL: store_i64_stride7_vf64:
 ; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $6600, %rsp # imm = 0x19C8
+; AVX512BW-ONLY-SLOW-NEXT:    subq $6280, %rsp # imm = 0x1888
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,3,11,3,11,3,11,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [2,10,0,3,2,10,0,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [2,10,0,3,2,10,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $96, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm28, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm3
@@ -16509,710 +16617,718 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [6,13,14,7,6,13,14,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm26, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm9[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm15, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm13, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm2[2,3,2,3],zmm9[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm12, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm20, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm26, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm9
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm10, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm28, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r9), %ymm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm9[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm4, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm16, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r9), %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm6[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm14, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm30, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm28, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm19, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm25
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%r9), %ymm9
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm9[0],ymm1[2],ymm9[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm21[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm2[2,3,2,3],zmm25[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm12, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm8, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm20, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm14, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm15, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm19, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm28, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm26, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm27, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm15, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm13, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%r9), %ymm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%r8), %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm2[2,3,2,3],zmm0[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm18, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%r8), %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm9, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm23, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm27, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm20, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm19 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %ymm25
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%r8), %ymm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm6[0],ymm25[0],ymm6[2],ymm25[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k2} = zmm6[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm28, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %ymm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %ymm30, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%r8), %ymm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm30[0],ymm4[2],ymm30[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm4[2,3,2,3],zmm3[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm10, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm11, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm31, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm5, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [13,5,13,5,13,5,13,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm26, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm27, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm5, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm3, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm5, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm29
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm26, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm11, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm13, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm26, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm13, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm0, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm11, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm26, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm5, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm26, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm11, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm13, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm26, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm11, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm5, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm29
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k3} = zmm0[0],zmm3[0],zmm0[2],zmm3[2],zmm0[4],zmm3[4],zmm0[6],zmm3[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm21[0],zmm3[0],zmm21[2],zmm3[2],zmm21[4],zmm3[4],zmm21[6],zmm3[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,0,1,0,8,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,6,14]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [15,7,15,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k3} = zmm29[0],zmm0[0],zmm29[2],zmm0[2],zmm29[4],zmm0[4],zmm29[6],zmm0[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm30 {%k3} = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm11, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm3[0],zmm0[2],zmm3[2],zmm0[4],zmm3[4],zmm0[6],zmm3[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm8, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm31, %zmm1, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm27[0],zmm26[0],zmm27[2],zmm26[2],zmm27[4],zmm26[4],zmm27[6],zmm26[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm31[0],zmm4[0],zmm31[2],zmm4[2],zmm31[4],zmm4[4],zmm31[6],zmm4[6]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm8, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm16 {%k3} = zmm21[0],zmm22[0],zmm21[2],zmm22[2],zmm21[4],zmm22[4],zmm21[6],zmm22[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k3} = zmm11[0],zmm22[0],zmm11[2],zmm22[2],zmm11[4],zmm22[4],zmm11[6],zmm22[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm31
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm8, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm6[0],zmm0[0],zmm6[2],zmm0[2],zmm6[4],zmm0[4],zmm6[6],zmm0[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k3} = zmm12[0],zmm10[0],zmm12[2],zmm10[2],zmm12[4],zmm10[4],zmm12[6],zmm10[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm30
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm30
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm14, %zmm28
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm15, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm7, %zmm27
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm9[0],zmm5[0],zmm9[2],zmm5[2],zmm9[4],zmm5[4],zmm9[6],zmm5[6]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm13
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm21
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm19, %zmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm9[0],zmm0[0],zmm9[2],zmm0[2],zmm9[4],zmm0[4],zmm9[6],zmm0[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm9, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm9, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm12 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm19[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,11,u,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,11,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm19, %zmm14
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm12, %zmm19, %zmm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm19[0],zmm12[0],zmm19[2],zmm12[2],zmm19[4],zmm12[4],zmm19[6],zmm12[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm13[0],zmm0[0],zmm13[2],zmm0[2],zmm13[4],zmm0[4],zmm13[6],zmm0[6]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm13, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm13, %zmm0, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm8 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,11,u,u,4,5,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,1,11,u,4,5,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm7, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $4, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm12 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,1,2,10,u,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <12,u,u,3,4,5,6,13>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm4, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm25 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,10,u,5,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm25, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,12,u,3,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,0,8,0,8,0,8]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm19 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k5}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $6, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 456(%rcx), %ymm12
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm1 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,1,2,9,u,u,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm1, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k5}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $64, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm3 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,15,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm18
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm1, %zmm16
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm6 = xmm6[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm2 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm2, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,2,3,4,8,u,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm2, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,2,3,4,8,u,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,15,u>
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,9,u,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm12, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,1,2,3,9,u,6,7>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm10
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm3, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm2, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm8, %zmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm13
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,10,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm8, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,12,3,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%r9), %ymm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%r8), %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,10,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,12,3,4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%r9), %ymm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm14[0],ymm4[2],ymm14[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm4[2,3,2,3],zmm3[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%r8), %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,8,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm6, %zmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,9,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,13,2,3,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [14,1,2,3,4,5,6,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm10, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm13
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $8, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k5}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-31, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm27 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm1 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm6 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm29 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm21 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm19, %zmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm4 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm30 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm23 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm19 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, (%rax), %zmm2, %zmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $112, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 64(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 128(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 192(%rax), %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 256(%rax), %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 320(%rax), %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 384(%rax), %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 256(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 320(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $3, 384(%rax), %zmm25, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm19 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm6 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm1 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm1 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 264(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 264(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 328(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm26 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 392(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm4 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 328(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm26 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastq 392(%rcx), %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = mem[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm21 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $56, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm22 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm21 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
@@ -17223,48 +17339,41 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $120, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm25 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm6 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm31 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm28 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm20
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $-61, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm15 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm25 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm12 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = zmm0[0,1,2,3],mem[4,5,6,7]
@@ -17273,136 +17382,142 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm7 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = zmm13[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm8 # 64-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # zmm8 = zmm15[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $14, %cl
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm13 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm28 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm10 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm15 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm24 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,3,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm20 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm0 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm29 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 3008(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 2944(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 2880(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm7, 2816(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2752(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 2624(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 2560(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 2496(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 3008(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 2944(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 2880(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm8, 2816(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 2752(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 2688(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2624(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 2560(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 2496(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 2432(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm6, 2368(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm7, 2368(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 2240(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 2240(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 2176(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 2048(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 2112(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 2048(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1984(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm4, 1920(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1856(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 1792(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm5, 1920(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1792(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1728(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1664(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 1600(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1664(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 1536(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm3, 1472(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm4, 1472(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 1216(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 1152(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 1088(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm2, 1024(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1216(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 1152(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 1088(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm3, 1024(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 768(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 704(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm1, 576(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 768(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 704(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 640(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm2, 576(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 320(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 256(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 320(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm2, 128(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 3520(%rax)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 3520(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -17416,694 +17531,715 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 3072(%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $6600, %rsp # imm = 0x19C8
+; AVX512BW-ONLY-SLOW-NEXT:    addq $6280, %rsp # imm = 0x1888
 ; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512BW-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512BW-ONLY-FAST-LABEL: store_i64_stride7_vf64:
 ; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $6696, %rsp # imm = 0x1A28
+; AVX512BW-ONLY-FAST-NEXT:    subq $6184, %rsp # imm = 0x1828
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm21
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm6
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,3,11,3,11,3,11,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,10,0,3,2,10,0,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [2,10,0,3,2,10,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $96, %r10b
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [4,9,0,3,4,9,0,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm27, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm13, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %ymm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm6
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm26
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [5,0,14,6,5,0,14,6]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [15,7,15,7,15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [6,13,14,7,6,13,14,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm16, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm10, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm12, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm26[0],ymm11[0],ymm26[2],ymm11[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm4[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm27, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm16, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [6,13,14,7,6,13,14,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm12, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r8), %ymm4
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm25
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm6[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm13, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm28[0],ymm16[0],ymm28[2],ymm16[2]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm27, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm18, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm12, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm8, %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm12, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %ymm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm12
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm12[0],ymm17[0],ymm12[2],ymm17[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm3[2,3,2,3],zmm30[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm27, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm18, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm13, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r8), %ymm5
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm5[0],ymm0[0],ymm5[2],ymm0[2]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm3[2,3,2,3],zmm24[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm9, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm11, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm8, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm20, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm8, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm16, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm5, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm7, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rax), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%r9), %ymm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %ymm22
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm22[0],ymm9[0],ymm22[2],ymm9[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm17, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %ymm19
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm8
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm8[0],ymm19[0],ymm8[2],ymm19[2]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm29
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm2, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm8, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm11, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm31
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm27, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm17, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rax), %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%r9), %ymm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%r8), %ymm4
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm8[2,3,2,3],zmm27[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm15, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm16, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm29, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm30, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm11, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm12, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm13, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm17, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rax), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%r9), %ymm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%r9), %ymm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%r8), %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k2} = zmm10[2,3,2,3],zmm1[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm27, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm3
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm11[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm14, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm17, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm24, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm15, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm31, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm13, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,7,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm26 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm26, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm25 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm17, %ymm0, %ymm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm9, %ymm0, %ymm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm4, %ymm0, %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm28 # 32-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm23 # 32-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm19, %ymm0, %ymm29
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %ymm29, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm5, %ymm0, %ymm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm10, %ymm0, %ymm3
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%r9), %ymm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%r8), %ymm2
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm0, %ymm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [3,0,12,4,3,0,12,4]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [13,5,13,5,13,5,13,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm12, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm30, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm15, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    movb $48, %r10b
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,8,0,1,0,8,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [3,0,12,4,3,0,12,4]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm10, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,0,10,2,1,0,10,2]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k3} = zmm0[0],zmm23[0],zmm0[2],zmm23[2],zmm0[4],zmm23[4],zmm0[6],zmm23[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm12, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm29 = [6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm29, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [15,7,15,7]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm25, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm27, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm30, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm3[0],zmm0[0],zmm3[2],zmm0[2],zmm3[4],zmm0[4],zmm3[6],zmm0[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm23, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm20, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm27, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm12, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm20, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm30, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm14, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm15, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm0, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm23, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm20, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm14, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm20, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm14, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm0, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm27, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm23, %zmm28
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm31
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm31, %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm31, %zmm2, %zmm23
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm31, %zmm20
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm31, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm31
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    movb $48, %r10b
+; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k3
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm19[0],zmm2[2],zmm19[2],zmm2[4],zmm19[4],zmm2[6],zmm19[6]
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm13[0],zmm0[0],zmm13[2],zmm0[2],zmm13[4],zmm0[4],zmm13[6],zmm0[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,5,13,5,13,5,13,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [6,14,6,14]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [15,7,15,7]
+; AVX512BW-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm13[0],zmm0[0],zmm13[2],zmm0[2],zmm13[4],zmm0[4],zmm13[6],zmm0[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm6, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm19
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm19
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k3} = zmm2[0],zmm30[0],zmm2[2],zmm30[2],zmm2[4],zmm30[4],zmm2[6],zmm30[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm29, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm22, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm25, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm27, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm10, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm11[0],zmm2[2],zmm11[2],zmm2[4],zmm11[4],zmm2[6],zmm11[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k3} = zmm2[0],zmm26[0],zmm2[2],zmm26[2],zmm2[4],zmm26[4],zmm2[6],zmm26[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm12, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm7, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm29, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm22, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm25, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm3, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm27, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm30, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm15, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm6, %zmm30
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm29
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm0[0],zmm18[0],zmm0[2],zmm18[2],zmm0[4],zmm18[4],zmm0[6],zmm18[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm7, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm22, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm13[0],zmm0[2],zmm13[2],zmm0[4],zmm13[4],zmm0[6],zmm13[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm29, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm25, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm3, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm31, %zmm27, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm12, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm30, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm15, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm27
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm12[0],zmm0[2],zmm12[2],zmm0[4],zmm12[4],zmm0[6],zmm12[6]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm20[0],zmm0[2],zmm20[2],zmm0[4],zmm20[4],zmm0[6],zmm20[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm29, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm25, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm30, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm27, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm12, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm22, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm5, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm15, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm12, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [11,3,11,3,11,3,11,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm6, %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm24
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm12, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm7, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k3} = zmm6[0],zmm5[0],zmm6[2],zmm5[2],zmm6[4],zmm5[4],zmm6[6],zmm5[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm25, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm11[0],zmm0[0],zmm11[2],zmm0[2],zmm11[4],zmm0[4],zmm11[6],zmm0[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm0, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm16[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,11,u,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm20, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm25
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm23 {%k3} = zmm12[0],zmm14[0],zmm12[2],zmm14[2],zmm12[4],zmm14[4],zmm12[6],zmm14[6]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm12, %zmm7
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm12, %zmm22
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm3, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm10[0],zmm5[0],zmm10[2],zmm5[2],zmm10[4],zmm5[4],zmm10[6],zmm5[6]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm10, %zmm6
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm10, %zmm5, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,11,u,u,4,5,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm28, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <0,1,11,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm5, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm15
 ; AVX512BW-ONLY-FAST-NEXT:    movb $4, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <0,1,2,10,u,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm14, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <12,u,u,3,4,5,6,13>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,12,u,3,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm5, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [0,8,0,8,0,8,0,8]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm17 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm20, %zmm26
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm9 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <0,1,2,10,u,5,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <12,u,u,3,4,5,6,13>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm4, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,12,u,3,4,5,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    movb $24, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $6, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k5
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,9,u,u,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm7, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm9
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,2,9,u,u,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm1, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $64, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,1,2,3,4,15,u,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm13
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm5 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,15,u,u>
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [9,1,9,1,9,1,9,1]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm11, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [5,0,14,6,5,0,14,6]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm0, %zmm28
 ; AVX512BW-ONLY-FAST-NEXT:    movb $12, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,1,2,3,4,8,u,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm2, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,15,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,9,u,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm1, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <13,u,2,3,4,5,6,14>
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm2, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm9 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm18 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm25 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm28 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm23 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rax), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %xmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm11 = xmm11[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm6 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm6, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,2,3,4,8,u,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,5,15,u>
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,2,3,9,u,6,7>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <13,u,2,3,4,5,6,14>
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm16 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm17 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm19 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm21 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm29 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm26 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rax), %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,10,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm16, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm13, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,12,3,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm19, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rax), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm16 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k2} = zmm3[2,3,2,3],zmm1[2,3,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm5, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,9,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm7, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,13,2,3,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rax), %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,9,0,3,4,9,0,3]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[2,3,2,3],zmm2[2,3,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm8
+; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,13,6,7,0,13,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm28
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,8,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm11, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm12
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,9,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm0, %zmm31
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,13,2,3,4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm28
+; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm5, %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    movb $8, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm15 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-31, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -18111,122 +18247,114 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm1 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm1 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm10 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm13 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm24 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm0, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm5 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm30 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm27 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %xmm2
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm25 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, (%rax), %zmm2, %zmm2
 ; AVX512BW-ONLY-FAST-NEXT:    movb $112, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 64(%rax), %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 128(%rax), %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 192(%rax), %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 256(%rax), %zmm17, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 320(%rax), %zmm26, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 384(%rax), %zmm20, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 256(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 320(%rax), %zmm0, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $3, 384(%rax), %zmm4, %zmm2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm25 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $56, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k2
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm23 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm24 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $14, %cl
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm21 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm19 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm19 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm18 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm15 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm15 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm14 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm3 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm3 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm27 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm23 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
@@ -18234,42 +18362,51 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $120, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm16 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm20 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm13 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm20 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm11 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $-61, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = zmm4[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
@@ -18278,82 +18415,72 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = zmm10[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm13 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = zmm12[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm13 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm29[0,1,2,3],zmm30[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm17 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm9 # 64-byte Folded Reload
+; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = zmm22[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm9 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 2944(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 2880(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm12, 2816(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 2752(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 2688(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm12, 2624(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 2560(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 3008(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 2944(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 2880(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm9, 2816(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 2752(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 2688(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm9, 2624(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 2560(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 2496(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 2432(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm12, 2368(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 2304(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 2240(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm8, 2368(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 2240(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2176(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 2112(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 2112(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 2048(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 1984(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm10, 1920(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 1856(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 1984(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm7, 1920(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 1856(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 1792(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1728(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1664(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 1600(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 1664(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 1600(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 1536(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm9, 1472(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm5, 1472(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1216(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm7, 1024(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1216(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 1152(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 1088(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm4, 1024(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 640(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm5, 576(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 768(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 704(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 640(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm2, 576(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -18361,77 +18488,75 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm2, 128(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 3520(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 3520(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 3456(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3328(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3264(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3200(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 3072(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 3200(%rax)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 3072(%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $6696, %rsp # imm = 0x1A28
+; AVX512BW-ONLY-FAST-NEXT:    addq $6184, %rsp # imm = 0x1828
 ; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512BW-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQBW-SLOW-LABEL: store_i64_stride7_vf64:
 ; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $6472, %rsp # imm = 0x1948
+; AVX512DQBW-SLOW-NEXT:    subq $6216, %rsp # imm = 0x1848
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm18, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm5
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [11,3,11,3,11,3,11,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm27 = [2,10,0,3,2,10,0,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm16
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    movb $96, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm17, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%r9), %ymm5
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%r9), %ymm3
@@ -18443,761 +18568,778 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [5,0,14,6,5,0,14,6]
-; AVX512DQBW-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [0,13,6,7,0,13,6,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [15,7,15,7,15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [6,13,14,7,6,13,14,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm17, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r9), %ymm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r8), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm16, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm9, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm25 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm25 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm25, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm26 = [6,13,14,7,6,13,14,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm15, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm2[2,3,2,3],zmm9[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm12, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm17, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm25, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm31
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm22
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm16
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm17, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%r9), %ymm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r9), %ymm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%r8), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm6[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm9, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r8), %ymm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm6[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm11, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm21, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm17, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm8, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm29, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm27, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%r9), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%r8), %ymm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm7, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm10
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm27
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%r9), %ymm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%r8), %ymm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm7[0],ymm2[2],ymm7[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm2[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm2[2,3,2,3],zmm27[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm12, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm16, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm17, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm15, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm30
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm23, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm5, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm9, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm25, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm21
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm7, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm27
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%r9), %ymm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%r8), %ymm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm2[2,3,2,3],zmm23[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm15, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm30, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm28
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm20 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%r9), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%r8), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm4[2,3,2,3],zmm2[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm31, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm16, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r9), %ymm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%r8), %ymm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm6[0],ymm23[0],ymm6[2],ymm23[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k2} = zmm6[2,3,2,3],zmm2[2,3,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm25, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm26, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [13,5,13,5,13,5,13,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [6,14,6,14,6,14,6,14]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,0,12,4,3,0,12,4]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm4, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm5, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm5, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm1, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm29
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm26, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm13, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm26, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm5, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm13, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm0, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm9, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm0, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm26, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm5, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm26, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm9, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm10
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm26, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm27, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm20, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm5, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm29, %zmm6, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm29
 ; AVX512DQBW-SLOW-NEXT:    movb $48, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k3
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
 ; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm4[0],zmm0[2],zmm4[2],zmm0[4],zmm4[4],zmm0[6],zmm4[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k3} = zmm19[0],zmm3[0],zmm19[2],zmm3[2],zmm19[4],zmm3[4],zmm19[6],zmm3[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,0,1,0,8,0,1]
 ; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [6,14,6,14]
+; AVX512DQBW-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [15,7,15,7]
+; AVX512DQBW-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k3} = zmm0[0],zmm3[0],zmm0[2],zmm3[2],zmm0[4],zmm3[4],zmm0[6],zmm3[6]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm8 = [15,7,15,7]
-; AVX512DQBW-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm15 {%k3} = zmm0[0],zmm4[0],zmm0[2],zmm4[2],zmm0[4],zmm4[4],zmm0[6],zmm4[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k3} = zmm22[0],zmm23[0],zmm22[2],zmm23[2],zmm22[4],zmm23[4],zmm22[6],zmm23[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm2, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm9, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm13, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm8, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm16
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm31, %zmm1, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm26 {%k3} = zmm28[0],zmm21[0],zmm28[2],zmm21[2],zmm28[4],zmm21[4],zmm28[6],zmm21[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm2, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm9, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm14 {%k3} = zmm31[0],zmm22[0],zmm31[2],zmm22[2],zmm31[4],zmm22[4],zmm31[6],zmm22[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm13, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm8, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k3} = zmm11[0],zmm18[0],zmm11[2],zmm18[2],zmm11[4],zmm18[4],zmm11[6],zmm18[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm13, %zmm31
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm15, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm25[0],zmm17[0],zmm25[2],zmm17[2],zmm25[4],zmm17[4],zmm25[6],zmm17[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm7, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm30
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm30
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k3} = zmm12[0],zmm21[0],zmm12[2],zmm21[2],zmm12[4],zmm21[4],zmm12[6],zmm21[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm2, %zmm27
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm13, %zmm28
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm15, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k3} = zmm7[0],zmm10[0],zmm7[2],zmm10[2],zmm7[4],zmm10[4],zmm7[6],zmm10[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm2, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm9, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm13, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm7, %zmm26
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm24
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm9[0],zmm5[0],zmm9[2],zmm5[2],zmm9[4],zmm5[4],zmm9[6],zmm5[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm25
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm22
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm25
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm21
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm10
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm9, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm17, %zmm21, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k3} = zmm21[0],zmm17[0],zmm21[2],zmm17[2],zmm21[4],zmm17[4],zmm21[6],zmm17[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm2, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm8, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm6[0],zmm0[0],zmm6[2],zmm0[2],zmm6[4],zmm0[4],zmm6[6],zmm0[6]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm6, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm4[0,1,2,3],zmm19[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm12, %zmm18, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm12, %zmm18, %zmm13
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm12, %zmm18, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm18[0],zmm12[0],zmm18[2],zmm12[2],zmm18[4],zmm12[4],zmm18[6],zmm12[6]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm10[0],zmm0[0],zmm10[2],zmm0[2],zmm10[4],zmm0[4],zmm10[6],zmm0[6]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm10, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm10, %zmm0, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm10
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm23 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm8 = zmm3[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,11,u,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm3, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,11,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm15
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,1,11,u,4,5,6,7>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm7, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
 ; AVX512DQBW-SLOW-NEXT:    movb $4, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,10,u,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm12, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <12,u,u,3,4,5,6,13>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,12,u,3,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm3, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm19 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm10 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm23, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <12,u,u,3,4,5,6,13>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm4, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,12,u,3,4,5,6,7>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-SLOW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    movb $24, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm18 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    movb $6, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 456(%rcx), %ymm12
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = mem[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm1 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,1,2,9,u,u,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm6 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    movb $64, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,15,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm10
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm1, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    movb $12, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm2 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,1,2,3,4,8,u,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm2, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,2,3,4,8,u,7>
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,5,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,3,9,u,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm12, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm12, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <13,u,2,3,4,5,6,14>
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm8, %zmm12
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm10
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,10,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,12,3,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%r9), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%r8), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm0[0],ymm7[0],ymm0[2],ymm7[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm7[2,3,2,3],zmm5[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm0
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm9
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%r9), %ymm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%r8), %ymm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm8[0],ymm4[2],ymm8[2]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,9,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,13,2,3,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm5
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm20
+; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,8,7]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [14,1,2,3,4,5,6,15]
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm12, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm10
 ; AVX512DQBW-SLOW-NEXT:    movb $8, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm21 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    movb $-31, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm5 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm30 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm27 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm25 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm22 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm21 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm18 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    movb $112, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm19, %zmm7 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm1, %zmm2 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm10, %zmm4 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm1, %zmm5 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 256(%rax), %zmm1, %zmm30 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, (%rax), %zmm1, %zmm0 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 320(%rax), %zmm1, %zmm25 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 384(%rax), %zmm3, %zmm21 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 64(%rax), %zmm1, %zmm4 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 128(%rax), %zmm0, %zmm2 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 192(%rax), %zmm0, %zmm3 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 256(%rax), %zmm0, %zmm27 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 320(%rax), %zmm0, %zmm22 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x2 $3, 384(%rax), %zmm23, %zmm18 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 8(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm5 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 72(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 136(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm2 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 200(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm4 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 264(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm30 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 328(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm26 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpbroadcastq 392(%rcx), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm22 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm19 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    movb $56, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm22 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm30 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm19 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movb $120, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm15 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm15 = zmm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm19 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm27 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm16 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm10 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm6 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, %zmm31 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm28 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm20 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movb $-61, %cl
 ; AVX512DQBW-SLOW-NEXT:    kmovd %ecx, %k1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm14 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm14 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm16 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm11 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm11 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm11 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm3 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm4 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm4 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm5 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm5 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm5 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm7 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = zmm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm7 = zmm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm8 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm8 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm8 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm8 # 64-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # zmm8 = zmm15[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
@@ -19212,111 +19354,111 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm17 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm15 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm28 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm25 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm2 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm2 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,3,3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm0 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512DQBW-SLOW-NEXT:    # ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,3,3]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm29 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, 3008(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 2944(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, 2880(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, 2752(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 2624(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, 2944(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, 2880(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm8, 2816(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 2752(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 2688(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 2624(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 2560(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, 2496(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 2432(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, 2176(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, 2496(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 2432(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm7, 2368(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 2304(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, 2240(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 2112(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 2048(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 1984(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, 1792(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, 2048(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 1984(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm5, 1920(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, 1856(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, 1792(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1728(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 1664(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 1600(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 1536(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 1600(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 1536(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm4, 1472(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 1216(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, 1152(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, 1088(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 1216(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 1152(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, 1088(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm3, 1024(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, 768(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 704(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 640(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 576(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 768(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 704(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 640(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm2, 576(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 320(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 256(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 320(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 256(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm1, 128(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 3520(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 3520(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -19327,959 +19469,961 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3264(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3200(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 3072(%rax)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, 3072(%rax)
 ; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $6472, %rsp # imm = 0x1948
+; AVX512DQBW-SLOW-NEXT:    addq $6216, %rsp # imm = 0x1848
 ; AVX512DQBW-SLOW-NEXT:    vzeroupper
 ; AVX512DQBW-SLOW-NEXT:    retq
 ;
 ; AVX512DQBW-FAST-LABEL: store_i64_stride7_vf64:
 ; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $6568, %rsp # imm = 0x19A8
+; AVX512DQBW-FAST-NEXT:    subq $6120, %rsp # imm = 0x17E8
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm21
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm6
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [11,3,11,3,11,3,11,3]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [2,10,0,3,2,10,0,3]
-; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, (%rsp) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm17
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    movb $96, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm7
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [9,1,9,1,9,1,9,1]
 ; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm16
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm23 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm12, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm27, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm14, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%r9), %ymm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %ymm16
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa (%r8), %ymm6
 ; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %ymm30
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %ymm28
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
 ; AVX512DQBW-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm20
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,1,12,7,0,1,12,7]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-FAST-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm22, %zmm0
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [5,0,14,6,5,0,14,6]
 ; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [15,7,15,7,15,7,15,7]
-; AVX512DQBW-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = [6,13,14,7,6,13,14,7]
-; AVX512DQBW-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm30[0],ymm9[0],ymm30[2],ymm9[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm4[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm16, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm17, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm3
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm18, %zmm2
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [6,13,14,7,6,13,14,7]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm12, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rax), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %ymm24
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm24[0],ymm0[0],ymm24[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm5[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm27, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm14, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm28[0],ymm16[0],ymm28[2],ymm16[2]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm2[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm16, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm23, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm17, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm21
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm8, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm30
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm27, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm12, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm16
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm14, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rax), %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %ymm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%r8), %ymm11
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm11[0],ymm25[0],ymm11[2],ymm25[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[2,3,2,3],zmm23[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rax), %zmm25
+; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%r9), %ymm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%r8), %ymm12
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %ymm12, %ymm24
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm25[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm13, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm9, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm19
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm23, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm20, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm17, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm10, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm7, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm18, %zmm4
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm8, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rax), %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%r9), %ymm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%r8), %ymm5
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm5[0],ymm9[0],ymm5[2],ymm9[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k2} = zmm4[2,3,2,3],zmm23[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r8), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r9), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm3, %zmm25
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm26
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm27, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm31
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm9
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rax), %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %ymm20
+; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%r8), %ymm13
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm13[0],ymm20[0],ymm13[2],ymm20[2]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm4[2,3,2,3],zmm0[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm25
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm19, %zmm5
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm18, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm23, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm22, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm11, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm19
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm22, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm18, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm17
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rax), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%r9), %ymm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%r8), %ymm2
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm10[2,3,2,3],zmm21[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r8), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r9), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm27, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm29
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm3, %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rax), %zmm27
+; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%r9), %ymm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%r8), %ymm4
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm7[2,3,2,3],zmm27[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r8), %zmm7
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r9), %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm25, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm23, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm22, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm18, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm19, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm18, %zmm7
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm17, %zmm27
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm8, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm14, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm12
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm7
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm3, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rax), %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%r9), %ymm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%r8), %ymm2
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
+; AVX512DQBW-FAST-NEXT:    vmovdqa %ymm2, %ymm6
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm11[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r8), %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r9), %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm14, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm27
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm11
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm25, %zmm27
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm23, %zmm27
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm21, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm17, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,3,7,7]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm30 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm30, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm28 # 32-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm24 # 32-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %ymm24, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm25, %ymm0, %ymm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm9, %ymm0, %ymm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm4, %ymm0, %ymm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm20, %ymm0, %ymm13
+; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm5, %ymm0, %ymm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm10, %ymm0, %ymm6
+; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%r9), %ymm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%r8), %ymm2
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm1, %ymm0, %ymm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [3,0,12,4,3,0,12,4]
-; AVX512DQBW-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
-; AVX512DQBW-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm21, %zmm2
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm24 = [3,0,12,4,3,0,12,4]
+; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm24, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm23, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm3, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm24, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm24, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm4, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm0, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm24, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm14
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm4, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm0, %zmm31
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm24, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm4, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm0, %zmm29
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm23, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm6, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm10
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [2,10,0,3,2,10,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm18
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm28
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm24, %zmm28
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm23, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm9
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm9, %zmm7
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm9, %zmm2, %zmm24
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm9, %zmm23
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm9, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm0, %zmm9
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm13
 ; AVX512DQBW-FAST-NEXT:    movb $48, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k3
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,8,0,1,0,8,0,1]
-; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [0,8,0,1,0,8,0,1]
+; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [1,0,10,2,1,0,10,2]
+; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [1,0,10,2,1,0,10,2]
-; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k3} = zmm8[0],zmm0[0],zmm8[2],zmm0[2],zmm8[4],zmm0[4],zmm8[6],zmm0[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,5,13,5,13,5,13,5]
+; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm26[0],zmm0[2],zmm26[2],zmm0[4],zmm26[4],zmm0[6],zmm26[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm30, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm18 = [15,7,15,7]
-; AVX512DQBW-FAST-NEXT:    # ymm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm18, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [6,14,6,14]
+; AVX512DQBW-FAST-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [15,7,15,7]
+; AVX512DQBW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm21, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm19
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k3} = zmm8[0],zmm0[0],zmm8[2],zmm0[2],zmm8[4],zmm0[4],zmm8[6],zmm0[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm8, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm4[0],zmm2[0],zmm4[2],zmm2[2],zmm4[4],zmm2[4],zmm4[6],zmm2[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm18, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm19[0],zmm2[2],zmm19[2],zmm2[4],zmm19[4],zmm2[6],zmm19[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm6, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm20
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k3} = zmm2[0],zmm30[0],zmm2[2],zmm30[2],zmm2[4],zmm30[4],zmm2[6],zmm30[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm7, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm30, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm22, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm3, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm8, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm2[0],zmm16[0],zmm2[2],zmm16[2],zmm2[4],zmm16[4],zmm2[6],zmm16[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm21, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm30
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm14 {%k3} = zmm2[0],zmm26[0],zmm2[2],zmm26[2],zmm2[4],zmm26[4],zmm2[6],zmm26[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm7, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm30, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm22, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm18, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm3, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm8, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm27
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm17 {%k3} = zmm5[0],zmm0[0],zmm5[2],zmm0[2],zmm5[4],zmm0[4],zmm5[6],zmm0[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm31
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm26
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k3} = zmm0[0],zmm12[0],zmm0[2],zmm12[2],zmm0[4],zmm12[4],zmm0[6],zmm12[6]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm11, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm1 {%k3} = zmm0[0],zmm31[0],zmm0[2],zmm31[2],zmm0[4],zmm31[4],zmm0[6],zmm31[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm30, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm31, %zmm18, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm3, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm8, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm28
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm7, %zmm28
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k3} = zmm23[0],zmm20[0],zmm23[2],zmm20[2],zmm23[4],zmm20[4],zmm23[6],zmm20[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm22, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm3, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm30, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [11,3,11,3,11,3,11,3]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm8
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm22, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm25
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm6, %zmm25
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm21
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm21
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm12, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm18, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm13
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm24 {%k3} = zmm12[0],zmm15[0],zmm12[2],zmm15[2],zmm12[4],zmm15[4],zmm12[6],zmm15[6]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm12, %zmm7
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm12, %zmm22
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm3, %zmm12
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm28 {%k3} = zmm11[0],zmm5[0],zmm11[2],zmm5[2],zmm11[4],zmm5[4],zmm11[6],zmm5[6]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm11, %zmm6
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm5, %zmm1
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm2[0,1,2,3],zmm18[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r8), %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,11,u,u,4,5,6,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm28, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r9), %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,1,11,u,4,5,6,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm18
+; AVX512DQBW-FAST-NEXT:    movb $4, %sil
+; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm10 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <0,1,2,10,u,5,6,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm16
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm29, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm8, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm4, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <12,u,u,3,4,5,6,13>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm4, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,12,u,3,4,5,6,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm14
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm8
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm7, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm29 {%k3} = zmm6[0],zmm5[0],zmm6[2],zmm5[2],zmm6[4],zmm5[4],zmm6[6],zmm5[6]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm6, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k3} = zmm12[0],zmm4[0],zmm12[2],zmm4[2],zmm12[4],zmm4[4],zmm12[6],zmm4[6]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm12, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm12, %zmm4, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r8), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,11,u,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm9, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r9), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,1,11,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm4, %zmm14
-; AVX512DQBW-FAST-NEXT:    movb $4, %sil
-; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm13 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,1,2,10,u,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm13, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <12,u,u,3,4,5,6,13>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = <0,12,u,3,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm10, %zmm18
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm0 # 64-byte Folded Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm23 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm27
 ; AVX512DQBW-FAST-NEXT:    movb $24, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm12 {%k4}
 ; AVX512DQBW-FAST-NEXT:    movb $6, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k5
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k5}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,2,9,u,u,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm7, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm12 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 456(%rcx), %ymm9
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k5}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,2,9,u,u,6,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm15, %zmm1, %zmm9
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm11 {%k4}
 ; AVX512DQBW-FAST-NEXT:    movb $64, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,1,2,3,4,15,u,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r8), %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r9), %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm5, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm5 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,1,2,3,4,15,u,u>
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm0, %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r8), %zmm10
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm0, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm4
+; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [9,1,9,1,9,1,9,1]
+; AVX512DQBW-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm15
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
+; AVX512DQBW-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm10, %zmm8
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [5,0,14,6,5,0,14,6]
+; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm10, %zmm0, %zmm2
 ; AVX512DQBW-FAST-NEXT:    movb $12, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rdx), %xmm5
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm0, %zmm11 {%k3}
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm11, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,1,2,3,4,8,u,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm5, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,1,2,3,4,5,15,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,2,3,9,u,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm1, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <13,u,2,3,4,5,6,14>
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm3, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm5, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm9 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm31 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm8 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm3 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm20
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm20 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm28 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = mem[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm24 {%k5}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rax), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,10,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,12,3,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm18, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rax), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm16 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k2} = zmm17[2,3,2,3],zmm1[2,3,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm16, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rdx), %xmm10
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm6 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm6, %zmm1
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,1,2,3,4,8,u,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm10
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,1,2,3,4,5,15,u>
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = <0,1,2,3,9,u,6,7>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <13,u,2,3,4,5,6,14>
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm9
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm11
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 8(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm6 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm17
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm17 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 136(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 200(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 264(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm27 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 328(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm26 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpbroadcastq 392(%rcx), %ymm0
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm21 {%k5}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rax), %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,10,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm16, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,12,3,4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm14, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rax), %zmm1
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,9,0,3,4,9,0,3]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm1[2,3,2,3]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,1,12,7,0,1,12,7]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm8
+; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [0,13,6,7,0,13,6,7]
+; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,5,8,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm7, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm10, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [14,1,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm6
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [14,1,2,3,4,5,6,15]
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm12
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm2, %zmm0
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm13, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm12
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,13,2,3,4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm9, %zmm28
+; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm5, %zmm11
 ; AVX512DQBW-FAST-NEXT:    movb $8, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm18 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k4}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm26 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k4}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9 {%k4}
 ; AVX512DQBW-FAST-NEXT:    movb $-31, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm2 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm13 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm1 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm1 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm5 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm11 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm25 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm5 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm29 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm31 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rdx), %xmm3
+; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm25 {%k3}
 ; AVX512DQBW-FAST-NEXT:    movb $112, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm0, %zmm13 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm0, %zmm1 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, (%rax), %zmm3, %zmm0 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 64(%rax), %zmm3, %zmm1 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 128(%rax), %zmm0, %zmm2 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 128(%rax), %zmm3, %zmm2 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm3, %zmm5 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 192(%rax), %zmm0, %zmm5 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 256(%rax), %zmm23, %zmm7 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 320(%rax), %zmm27, %zmm11 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm27
-; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 384(%rax), %zmm10, %zmm25 {%k2}
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 256(%rax), %zmm0, %zmm29 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 320(%rax), %zmm0, %zmm31 {%k2}
+; AVX512DQBW-FAST-NEXT:    vinserti64x2 $3, 384(%rax), %zmm4, %zmm25 {%k2}
 ; AVX512DQBW-FAST-NEXT:    movb $56, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k2
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm20 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm30 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm26
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm24 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm21 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $14, %cl
 ; AVX512DQBW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm23 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm20 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm22 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm19 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm20 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm18 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm16 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm15 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm14 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm6 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm4 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm2 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm29 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],mem[6,7]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm24 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm21 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm7 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $120, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm15 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm3 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm16 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm18 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm13
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $-61, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm17 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm5 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm7 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm7 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm7 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm8 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm9 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm9 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm10 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10 {%k1}
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm11 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm11 = zmm30[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm4 = zmm4[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm5 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm8 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm9 = zmm9[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k1}
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm10 # 64-byte Folded Reload
+; AVX512DQBW-FAST-NEXT:    # zmm10 = zmm22[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm10 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 3008(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, 2944(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, 2880(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 2816(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 2752(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 3008(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 2944(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 2880(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm10, 2816(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, 2752(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, 2688(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 2624(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 2560(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm10, 2624(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 2560(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 2496(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 2432(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 2368(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, 2304(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, 2240(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 2432(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm9, 2368(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, 2304(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, 2240(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2176(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 2112(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 2112(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 2048(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, 1984(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 1920(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, 1856(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 1984(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm8, 1920(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, 1856(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, 1792(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1728(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 1664(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 1600(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 1536(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 1472(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 1664(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 1600(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 1536(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm7, 1472(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, 1408(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 1216(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 1152(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, 1088(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 1024(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 1216(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, 1152(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, 1088(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm5, 1024(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 704(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, 640(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 576(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, 512(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 768(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 640(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm4, 576(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 512(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 320(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 320(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-FAST-NEXT:    vmovaps %zmm3, 128(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, 3520(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 3520(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, 3456(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -20288,10 +20432,10 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3264(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3200(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 3072(%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, 3072(%rax)
 ; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3136(%rax)
-; AVX512DQBW-FAST-NEXT:    addq $6568, %rsp # imm = 0x19A8
+; AVX512DQBW-FAST-NEXT:    addq $6120, %rsp # imm = 0x17E8
 ; AVX512DQBW-FAST-NEXT:    vzeroupper
 ; AVX512DQBW-FAST-NEXT:    retq
   %in.vec0 = load <64 x i64>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
index 8bad8e79ae361..76d060fe422b3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
@@ -329,29 +329,30 @@ define void @store_i64_stride8_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
 ; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
 ; AVX512-NEXT:    vinserti64x4 $1, (%r10), %zmm3, %zmm3
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,0,4,8,12]
-; AVX512-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm4, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,4,8,12]
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,1,5,9,13]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,4,8,12,0,4,8,12]
 ; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm5, %zmm6
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,5,9,13]
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,2,6,10,14]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,5,9,13,1,5,9,13]
 ; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm6, %zmm7
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm6
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,6,10,14]
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm6
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,3,7,11,15]
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [2,6,10,14,2,6,10,14]
 ; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512-NEXT:    vpermt2q %zmm3, %zmm7, %zmm2
-; AVX512-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
-; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = [3,7,11,15]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [3,7,11,15,3,7,11,15]
+; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm0
+; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm7[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512-NEXT:    vmovdqa64 %zmm0, 192(%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm6, 128(%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm5, 64(%rax)
@@ -784,112 +785,117 @@ define void @store_i64_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 ; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm6
-; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm9
-; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm7
-; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm8
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm8
+; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm9
 ; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512F-NEXT:    vmovdqa64 (%r11), %zmm1
+; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm1
+; AVX512F-NEXT:    vmovdqa64 (%r11), %zmm2
 ; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm3
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,1,9,1,9,1,9]
 ; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm4, %zmm5
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm0, %zmm4
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
 ; AVX512F-NEXT:    movb $-64, %r8b
 ; AVX512F-NEXT:    kmovw %r8d, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512F-NEXT:    vinserti128 $1, (%rcx), %ymm5, %ymm10
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm5, %ymm12
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm12[1],ymm10[1],ymm12[3],ymm10[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX512F-NEXT:    vinserti128 $1, (%rcx), %ymm4, %ymm10
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm4, %ymm11
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],ymm10[1],ymm11[3],ymm10[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm5, %zmm16
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14]
 ; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm5, %zmm11
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k1} = zmm1[0],zmm3[0],zmm1[2],zmm3[2],zmm1[4],zmm3[4],zmm1[6],zmm3[6]
-; AVX512F-NEXT:    vpermi2q %zmm9, %zmm6, %zmm5
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [6,14,6,14]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm2[0],zmm3[0],zmm2[2],zmm3[2],zmm2[4],zmm3[4],zmm2[6],zmm3[6]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm8, %zmm12
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [6,14]
+; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm2[1],zmm3[1],zmm2[3],zmm3[3],zmm2[5],zmm3[5],zmm2[7],zmm3[7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [7,15,7,15]
 ; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm8, %zmm7, %zmm13
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm11, %zmm5
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm11, %zmm13
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm13 {%k1} = zmm1[1],zmm3[1],zmm1[3],zmm3[3],zmm1[5],zmm3[5],zmm1[7],zmm3[7]
-; AVX512F-NEXT:    vpermi2q %zmm9, %zmm6, %zmm11
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [7,15,7,15]
-; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm8, %zmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm14 = [7,15]
+; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm12, %zmm12
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [4,12,4,12,4,12,4,12]
 ; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm13, %zmm14
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm15 = zmm0[0],zmm2[0],zmm0[2],zmm2[2],zmm0[4],zmm2[4],zmm0[6],zmm2[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm9, %zmm6, %zmm13
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,12,4,12]
-; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm13
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm14 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm8, %zmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,12]
+; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm15
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm14, %zmm13
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,5,13,5,13,5,13]
 ; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm15
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm16 = zmm0[1],zmm2[1],zmm0[3],zmm2[3],zmm0[5],zmm2[5],zmm0[7],zmm2[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm14, %zmm6
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermi2q %zmm8, %zmm7, %zmm9
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm16, %zmm6
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm14
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm15 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermi2q %zmm9, %zmm8, %zmm14
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [5,13]
+; AVX512F-NEXT:    vpermi2q %zmm7, %zmm6, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm15, %zmm6
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [0,8,0,8,0,8,0,8]
 ; AVX512F-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm7, %zmm8
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm0, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm12[0],ymm10[0],ymm12[2],ymm10[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [2,10,2,10,2,10,2,10]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm9
-; AVX512F-NEXT:    vpermi2q %zmm2, %zmm0, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
-; AVX512F-NEXT:    vmovdqa (%rcx), %ymm9
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqa (%rcx), %ymm8
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm10
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
 ; AVX512F-NEXT:    vmovdqa (%rsi), %ymm14
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm15
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm12[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm8, %zmm4
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm9[1],ymm10[3],ymm9[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm11[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm9, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512F-NEXT:    vmovdqa64 %zmm0, 192(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, 128(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm6, 320(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm13, 256(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 448(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 448(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, 384(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 64(%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -899,112 +905,117 @@ define void @store_i64_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm9
 ; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 (%r11), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 (%r11), %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm3
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,1,9,1,9,1,9]
 ; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm4, %zmm5
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
 ; AVX512BW-NEXT:    movb $-64, %r8b
 ; AVX512BW-NEXT:    kmovd %r8d, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm5, %ymm10
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm5, %ymm12
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm12[1],ymm10[1],ymm12[3],ymm10[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm4, %ymm10
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm4, %ymm11
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],ymm10[1],ymm11[3],ymm10[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm5, %zmm16
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [6,14,6,14,6,14,6,14]
 ; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm11
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k1} = zmm1[0],zmm3[0],zmm1[2],zmm3[2],zmm1[4],zmm3[4],zmm1[6],zmm3[6]
-; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm5
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [6,14,6,14]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm2[0],zmm3[0],zmm2[2],zmm3[2],zmm2[4],zmm3[4],zmm2[6],zmm3[6]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm8, %zmm12
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [6,14]
+; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm12
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm2[1],zmm3[1],zmm2[3],zmm3[3],zmm2[5],zmm3[5],zmm2[7],zmm3[7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [7,15,7,15]
 ; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm13
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm11, %zmm5
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm13
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm13 {%k1} = zmm1[1],zmm3[1],zmm1[3],zmm3[3],zmm1[5],zmm3[5],zmm1[7],zmm3[7]
-; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm11
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [7,15,7,15]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm8, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = [7,15]
+; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm12, %zmm12
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [4,12,4,12,4,12,4,12]
 ; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm14
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm15 = zmm0[0],zmm2[0],zmm0[2],zmm2[2],zmm0[4],zmm2[4],zmm0[6],zmm2[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm6, %zmm13
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,12,4,12]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm13
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm14 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm14 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm8, %zmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,12]
+; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm14, %zmm13
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [5,13,5,13,5,13,5,13]
 ; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm15
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm16 = zmm0[1],zmm2[1],zmm0[3],zmm2[3],zmm0[5],zmm2[5],zmm0[7],zmm2[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16 {%k1}
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm6
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm7, %zmm9
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm16, %zmm6
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm14
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm15 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermi2q %zmm9, %zmm8, %zmm14
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [5,13]
+; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm6, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm15, %zmm6
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm8
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm12[0],ymm10[0],ymm12[2],ymm10[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm7, %zmm7
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [2,10,2,10,2,10,2,10]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm9
-; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm9
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm8
 ; AVX512BW-NEXT:    vmovdqa (%rdx), %ymm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm14
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm15
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm12[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm8, %zmm4
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm9[1],ymm10[3],ymm9[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm11[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm9, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, 128(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm6, 320(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, 256(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 448(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 448(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, 384(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, 64(%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <8 x i64>, ptr %in.vecptr0, align 64
@@ -1890,415 +1901,437 @@ define void @store_i64_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-LABEL: store_i64_stride8_vf16:
 ; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    subq $72, %rsp
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm11
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 64(%rdx), %zmm12
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm8
 ; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm0
-; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm26
-; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm17
-; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm6
+; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm23
+; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm29
 ; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm16
-; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm31
-; AVX512F-NEXT:    vmovdqa64 64(%r9), %zmm27
-; AVX512F-NEXT:    vmovdqa64 (%r11), %zmm8
-; AVX512F-NEXT:    vmovdqa64 64(%r11), %zmm30
-; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm9
-; AVX512F-NEXT:    vmovdqa64 64(%r10), %zmm29
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [1,9,1,9,1,9,1,9]
-; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm18, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm18, %zmm4
-; AVX512F-NEXT:    movb $-64, %r8b
-; AVX512F-NEXT:    kmovw %r8d, %k1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm24
+; AVX512F-NEXT:    vmovdqa64 64(%r9), %zmm30
+; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm7
+; AVX512F-NEXT:    vmovdqa64 64(%r10), %zmm6
+; AVX512F-NEXT:    vmovdqa64 (%rax), %zmm10
+; AVX512F-NEXT:    vmovdqa64 64(%rax), %zmm19
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm17, %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
+; AVX512F-NEXT:    movb $-64, %al
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm10, %ymm10
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm10[1],ymm1[1],ymm10[3],ymm1[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm4, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm23, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm23, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm13 {%k1}
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm10[0],ymm1[0],ymm10[2],ymm1[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm13, %zmm21
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [5,13,5,13,5,13,5,13]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm10, %zmm1
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm6[1],zmm31[1],zmm6[3],zmm31[3],zmm6[5],zmm31[5],zmm6[7],zmm31[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm14, %zmm13
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm22
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [4,12,4,12,4,12,4,12]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm15, %zmm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm6[0],zmm31[0],zmm6[2],zmm31[2],zmm6[4],zmm31[4],zmm6[6],zmm31[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm15, %zmm13
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12]
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm5, %ymm5
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm20, %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm18, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm21, %zmm1
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm29[1],zmm24[1],zmm29[3],zmm24[3],zmm29[5],zmm24[5],zmm29[7],zmm24[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13]
 ; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm1, %zmm5
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm24
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm13, %zmm5
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm8[1],zmm9[1],zmm8[3],zmm9[3],zmm8[5],zmm9[5],zmm8[7],zmm9[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm13, %zmm2
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,7,15]
-; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm4, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm5, %zmm20
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm28, %zmm2
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k1} = zmm8[0],zmm9[0],zmm8[2],zmm9[2],zmm8[4],zmm9[4],zmm8[6],zmm9[6]
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm28, %zmm3
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
-; AVX512F-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm7, %zmm0
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm18, %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm27, %zmm16, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm1, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = [5,13]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm5, %zmm11
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm26, %zmm3
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm29[0],zmm24[0],zmm29[2],zmm24[2],zmm29[4],zmm24[4],zmm29[6],zmm24[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm9, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = [4,12]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm12, %zmm28
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm22, %zmm13
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm13 {%k1} = zmm7[1],zmm10[1],zmm7[3],zmm10[3],zmm7[5],zmm10[5],zmm7[7],zmm10[7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [7,15,7,15]
+; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm12, %zmm15
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,15]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm3, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm25
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} ymm31 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm31 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm31, %zmm0
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm23 = [6,14]
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm23, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm27, %zmm8
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm7[0],zmm10[0],zmm7[2],zmm10[2],zmm7[4],zmm10[4],zmm7[6],zmm10[6]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm13
+; AVX512F-NEXT:    vpermi2q %zmm19, %zmm6, %zmm17
+; AVX512F-NEXT:    vpermi2q %zmm30, %zmm16, %zmm14
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k1}
 ; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, 64(%rcx), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512F-NEXT:    vinserti128 $1, 64(%rdx), %ymm2, %ymm2
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm2[1],ymm0[1],ymm2[3],ymm0[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm18, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm23, %zmm3
-; AVX512F-NEXT:    vpermi2q %zmm27, %zmm16, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k1}
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[2],ymm0[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm23, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm16[1],zmm27[1],zmm16[3],zmm27[3],zmm16[5],zmm27[5],zmm16[7],zmm27[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm25, %zmm11, %zmm10
-; AVX512F-NEXT:    vpermi2q %zmm26, %zmm12, %zmm14
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm15, %zmm0
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm16[0],zmm27[0],zmm16[2],zmm27[2],zmm16[4],zmm27[4],zmm16[6],zmm27[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
-; AVX512F-NEXT:    vpermi2q %zmm25, %zmm11, %zmm15
-; AVX512F-NEXT:    vpermi2q %zmm26, %zmm12, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm13, %zmm0
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm30[1],zmm29[1],zmm30[3],zmm29[3],zmm30[5],zmm29[5],zmm30[7],zmm29[7]
-; AVX512F-NEXT:    vpermi2q %zmm25, %zmm11, %zmm13
-; AVX512F-NEXT:    vpermi2q %zmm26, %zmm12, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm28, %zmm0
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6]
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm28, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm7, %zmm12
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [3,11,3,11,3,11,3,11]
-; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm3, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm3, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm10 {%k1}
-; AVX512F-NEXT:    vmovdqa (%rcx), %ymm7
-; AVX512F-NEXT:    vmovdqa 64(%rcx), %ymm11
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm4[1],ymm0[1],ymm4[3],ymm0[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm14, %zmm14
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm15
+; AVX512F-NEXT:    vpermi2q %zmm19, %zmm6, %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm30, %zmm16, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm18 {%k1}
+; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm8
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[2],ymm0[2]
+; AVX512F-NEXT:    vmovdqa64 64(%rdx), %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm18
+; AVX512F-NEXT:    vpermi2q %zmm19, %zmm6, %zmm21
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm16[1],zmm30[1],zmm16[3],zmm30[3],zmm16[5],zmm30[5],zmm16[7],zmm30[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512F-NEXT:    vpermi2q %zmm20, %zmm0, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm8, %zmm15, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm5
+; AVX512F-NEXT:    vpermi2q %zmm19, %zmm6, %zmm26
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm1 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm1 {%k1}
+; AVX512F-NEXT:    vpermi2q %zmm20, %zmm0, %zmm9
+; AVX512F-NEXT:    vpermi2q %zmm8, %zmm15, %zmm11
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm1, %zmm17
+; AVX512F-NEXT:    vpermi2q %zmm20, %zmm0, %zmm12
+; AVX512F-NEXT:    vpermi2q %zmm8, %zmm15, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vpermi2q %zmm30, %zmm16, %zmm22
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm22 {%k1} = zmm6[1],zmm19[1],zmm6[3],zmm19[3],zmm6[5],zmm19[5],zmm6[7],zmm19[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm22, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm30, %zmm16, %zmm27
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k1} = zmm6[0],zmm19[0],zmm6[2],zmm19[2],zmm6[4],zmm19[4],zmm6[6],zmm19[6]
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm31, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm23, %zmm15
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm4, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm8, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqa (%rcx), %ymm11
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm12
-; AVX512F-NEXT:    vmovdqa 64(%rdx), %ymm13
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm14 = ymm12[1],ymm7[1],ymm12[3],ymm7[3]
-; AVX512F-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %ymm23
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm25
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm26
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm25[1],ymm15[1],ymm25[3],ymm15[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm14[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm5
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,2,10,2,10,2,10]
-; AVX512F-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm10, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm31, %zmm10, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm6 {%k1}
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm12[0],ymm7[0],ymm12[2],ymm7[2]
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm25[0],ymm15[0],ymm25[2],ymm15[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm6, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm3, %zmm7
-; AVX512F-NEXT:    vpermi2q %zmm27, %zmm16, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm3 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm13[1],ymm11[1],ymm13[3],ymm11[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm26[1],ymm23[1],ymm26[3],ymm23[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm3, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm10, %zmm30
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm10, %zmm16
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm16 {%k1}
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm26[0],ymm23[0],ymm26[2],ymm23[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm16, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 640(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 704(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm0, 896(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 960(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 768(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 832(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 512(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 576(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 384(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, 448(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, 256(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 320(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm21, (%rax)
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm11[1],ymm12[3],ymm11[3]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm15, %zmm7
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm10
+; AVX512F-NEXT:    vpermi2q %zmm19, %zmm6, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm15, %zmm6
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm15[1],ymm10[1],ymm15[3],ymm10[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm24, %zmm2, %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm29 {%k1}
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm15[0],ymm10[0],ymm15[2],ymm10[2]
+; AVX512F-NEXT:    vmovdqa 64(%rcx), %ymm10
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm9[2,3],ymm7[2,3]
+; AVX512F-NEXT:    vmovdqa 64(%rdx), %ymm9
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm29, %zmm7
+; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm11
+; AVX512F-NEXT:    vpermi2q %zmm30, %zmm16, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm4
+; AVX512F-NEXT:    vpermt2q %zmm30, %zmm2, %zmm16
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm9[1],ymm10[1],ymm9[3],ymm10[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm4[1],ymm11[1],ymm4[3],ymm11[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm8, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm16 {%k1}
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm9[0],ymm10[0],ymm9[2],ymm10[2]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm11[0],ymm4[2],ymm11[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm6[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm16, %zmm4
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 640(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 704(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 128(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 896(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 960(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 768(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 832(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 512(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 576(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, 384(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, 448(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm28, 256(%rax)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 320(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512F-NEXT:    addq $72, %rsp
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_i64_stride8_vf16:
 ; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    subq $72, %rsp
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm8
 ; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm29
 ; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 64(%r9), %zmm27
-; AVX512BW-NEXT:    vmovdqa64 (%r11), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 64(%r11), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 64(%r10), %zmm29
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [1,9,1,9,1,9,1,9]
-; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm18, %zmm4
-; AVX512BW-NEXT:    movb $-64, %r8b
-; AVX512BW-NEXT:    kmovd %r8d, %k1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm24
+; AVX512BW-NEXT:    vmovdqa64 64(%r9), %zmm30
+; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 64(%r10), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm10
+; AVX512BW-NEXT:    vmovdqa64 64(%rax), %zmm19
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm17 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm17, %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
+; AVX512BW-NEXT:    movb $-64, %al
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm10, %ymm10
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm10[1],ymm1[1],ymm10[3],ymm1[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm4, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [0,8,0,8,0,8,0,8]
-; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm23, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm23, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm13 {%k1}
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm10[0],ymm1[0],ymm10[2],ymm1[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm13, %zmm21
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [5,13,5,13,5,13,5,13]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm1
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm6[1],zmm31[1],zmm6[3],zmm31[3],zmm6[5],zmm31[5],zmm6[7],zmm31[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm1
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm14, %zmm13
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm22
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [4,12,4,12,4,12,4,12]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm15, %zmm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm6[0],zmm31[0],zmm6[2],zmm31[2],zmm6[4],zmm31[4],zmm6[6],zmm31[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm15, %zmm13
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,12,4,12]
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm5, %ymm5
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm21, %zmm1
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm29[1],zmm24[1],zmm29[3],zmm24[3],zmm29[5],zmm24[5],zmm29[7],zmm24[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm3 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13]
 ; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm1, %zmm5
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm4, %zmm24
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm13, %zmm5
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm8[1],zmm9[1],zmm8[3],zmm9[3],zmm8[5],zmm9[5],zmm8[7],zmm9[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm13, %zmm2
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [7,15,7,15]
-; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm4, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm5, %zmm20
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm28, %zmm2
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 {%k1} = zmm8[0],zmm9[0],zmm8[2],zmm9[2],zmm8[4],zmm9[4],zmm8[6],zmm9[6]
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm3
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [6,14,6,14]
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm7, %zmm0
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm18, %zmm0
-; AVX512BW-NEXT:    vpermi2q %zmm27, %zmm16, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm1, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [5,13]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm5, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm26 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    # zmm26 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm26, %zmm3
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm29[0],zmm24[0],zmm29[2],zmm24[2],zmm29[4],zmm24[4],zmm29[6],zmm24[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm12 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm9, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [4,12]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm12, %zmm28
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm22, %zmm13
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm13 {%k1} = zmm7[1],zmm10[1],zmm7[3],zmm10[3],zmm7[5],zmm10[5],zmm7[7],zmm10[7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [7,15,7,15]
+; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm12, %zmm15
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,15]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm3, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm25
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm31 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm31 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm31, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm23 = [6,14]
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm23, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm27, %zmm8
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm7[0],zmm10[0],zmm7[2],zmm10[2],zmm7[4],zmm10[4],zmm7[6],zmm10[6]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm13
+; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm17
+; AVX512BW-NEXT:    vpermi2q %zmm30, %zmm16, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512BW-NEXT:    vinserti128 $1, 64(%rcx), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512BW-NEXT:    vinserti128 $1, 64(%rdx), %ymm2, %ymm2
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm2[1],ymm0[1],ymm2[3],ymm0[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm18, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm23, %zmm3
-; AVX512BW-NEXT:    vpermi2q %zmm27, %zmm16, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm23 {%k1}
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[2],ymm0[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm23, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm16[1],zmm27[1],zmm16[3],zmm27[3],zmm16[5],zmm27[5],zmm16[7],zmm27[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm25, %zmm11, %zmm10
-; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm12, %zmm14
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm0
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm16[0],zmm27[0],zmm16[2],zmm27[2],zmm16[4],zmm27[4],zmm16[6],zmm27[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
-; AVX512BW-NEXT:    vpermi2q %zmm25, %zmm11, %zmm15
-; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm12, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm0
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm30[1],zmm29[1],zmm30[3],zmm29[3],zmm30[5],zmm29[5],zmm30[7],zmm29[7]
-; AVX512BW-NEXT:    vpermi2q %zmm25, %zmm11, %zmm13
-; AVX512BW-NEXT:    vpermi2q %zmm26, %zmm12, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm28, %zmm0
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm30[0],zmm29[0],zmm30[2],zmm29[2],zmm30[4],zmm29[4],zmm30[6],zmm29[6]
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm28, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm7, %zmm12
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [3,11,3,11,3,11,3,11]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm3, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm3, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm10 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm7
-; AVX512BW-NEXT:    vmovdqa 64(%rcx), %ymm11
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm4[1],ymm0[1],ymm4[3],ymm0[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm14, %zmm14
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm15
+; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm30, %zmm16, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm18 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm8
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[2],ymm0[2]
+; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm18, %zmm18
+; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm21
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm16[1],zmm30[1],zmm16[3],zmm30[3],zmm16[5],zmm30[5],zmm16[7],zmm30[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512BW-NEXT:    vpermi2q %zmm20, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm15, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm4, %zmm5
+; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm26
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm1 = zmm16[0],zmm30[0],zmm16[2],zmm30[2],zmm16[4],zmm30[4],zmm16[6],zmm30[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm1 {%k1}
+; AVX512BW-NEXT:    vpermi2q %zmm20, %zmm0, %zmm9
+; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm15, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm1, %zmm17
+; AVX512BW-NEXT:    vpermi2q %zmm20, %zmm0, %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm8, %zmm15, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vpermi2q %zmm30, %zmm16, %zmm22
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm22 {%k1} = zmm6[1],zmm19[1],zmm6[3],zmm19[3],zmm6[5],zmm19[5],zmm6[7],zmm19[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm22, %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm30, %zmm16, %zmm27
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k1} = zmm6[0],zmm19[0],zmm6[2],zmm19[2],zmm6[4],zmm19[4],zmm6[6],zmm19[6]
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm31, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm23, %zmm15
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm27, %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm4, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm8, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm11
 ; AVX512BW-NEXT:    vmovdqa (%rdx), %ymm12
-; AVX512BW-NEXT:    vmovdqa 64(%rdx), %ymm13
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm14 = ymm12[1],ymm7[1],ymm12[3],ymm7[3]
-; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %ymm23
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm25
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm26
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm5 = ymm25[1],ymm15[1],ymm25[3],ymm15[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm14[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm5
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [2,10,2,10,2,10,2,10]
-; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm31, %zmm10, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm6 {%k1}
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm12[0],ymm7[0],ymm12[2],ymm7[2]
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm25[0],ymm15[0],ymm25[2],ymm15[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm6, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm3, %zmm7
-; AVX512BW-NEXT:    vpermi2q %zmm27, %zmm16, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm3 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm13[1],ymm11[1],ymm13[3],ymm11[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm26[1],ymm23[1],ymm26[3],ymm23[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm3, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm30
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm16
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm16 {%k1}
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm26[0],ymm23[0],ymm26[2],ymm23[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm16, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 640(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 704(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, 896(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 960(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 768(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 832(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 512(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 576(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 384(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, 448(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, 256(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 320(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, (%rax)
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm11[1],ymm12[3],ymm11[3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm15, %zmm7
+; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm10
+; AVX512BW-NEXT:    vpermi2q %zmm19, %zmm6, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm15, %zmm6
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm15[1],ymm10[1],ymm15[3],ymm10[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm29 {%k1}
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm15[0],ymm10[0],ymm15[2],ymm10[2]
+; AVX512BW-NEXT:    vmovdqa 64(%rcx), %ymm10
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm9[2,3],ymm7[2,3]
+; AVX512BW-NEXT:    vmovdqa 64(%rdx), %ymm9
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm29, %zmm7
+; AVX512BW-NEXT:    vmovdqa 64(%rsi), %ymm11
+; AVX512BW-NEXT:    vpermi2q %zmm30, %zmm16, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm4
+; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm2, %zmm16
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm9[1],ymm10[1],ymm9[3],ymm10[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm4[1],ymm11[1],ymm4[3],ymm11[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm8, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm16 {%k1}
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm9[0],ymm10[0],ymm9[2],ymm10[2]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm11[0],ymm4[2],ymm11[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm6[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm16, %zmm4
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 640(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 704(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 896(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 960(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, 768(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 832(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 512(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 576(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, 384(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 448(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm28, 256(%rax)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 320(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512BW-NEXT:    addq $72, %rsp
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <16 x i64>, ptr %in.vecptr0, align 64
@@ -4142,457 +4175,472 @@ define void @store_i64_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-LABEL: store_i64_stride8_vf32:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $2632, %rsp # imm = 0xA48
+; AVX512F-NEXT:    subq $2504, %rsp # imm = 0x9C8
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    vmovaps 128(%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm3
+; AVX512F-NEXT:    vmovaps 192(%rdx), %zmm0
 ; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm25
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm0
-; AVX512F-NEXT:    vmovaps 192(%rdx), %zmm2
-; AVX512F-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rdx), %zmm13
-; AVX512F-NEXT:    vmovdqa64 64(%rdx), %zmm2
-; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm6
-; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm21
-; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm7
-; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm19
-; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm18
-; AVX512F-NEXT:    vmovdqa64 64(%r9), %zmm28
-; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm17
-; AVX512F-NEXT:    vmovdqa64 64(%r10), %zmm16
-; AVX512F-NEXT:    vmovdqa64 (%rax), %zmm24
-; AVX512F-NEXT:    vmovdqa64 64(%rax), %zmm22
+; AVX512F-NEXT:    vmovaps 128(%rdx), %zmm0
+; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 64(%rdx), %zmm9
+; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512F-NEXT:    vmovdqa64 128(%rcx), %zmm8
+; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm1
+; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm0
+; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm24
+; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm28
+; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm15
+; AVX512F-NEXT:    vmovdqa64 (%rax), %zmm19
 ; AVX512F-NEXT:    movb $-64, %r11b
 ; AVX512F-NEXT:    kmovw %r11d, %k1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [4,12,4,12,4,12,4,12]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm15, %zmm9
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm9
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,4,12]
-; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm12, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [5,13,5,13,5,13,5,13]
-; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm8, %zmm9
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm23[1],zmm18[1],zmm23[3],zmm18[3],zmm23[5],zmm18[5],zmm23[7],zmm18[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm8, %zmm9
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm14, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm3, %zmm9
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm17[0],zmm24[0],zmm17[2],zmm24[2],zmm17[4],zmm24[4],zmm17[6],zmm24[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm10
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,14,6,14]
-; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm4, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm9, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm9
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm29, %zmm9
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm29, %zmm5
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,7,15]
-; AVX512F-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm15, %zmm5
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 = zmm19[0],zmm28[0],zmm19[2],zmm28[2],zmm19[4],zmm28[4],zmm19[6],zmm28[6]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm6, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm17
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 = zmm24[0],zmm28[0],zmm24[2],zmm28[2],zmm24[4],zmm28[4],zmm24[6],zmm28[6]
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm15, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm12, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,12]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm12, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm8, %zmm5
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm19[1],zmm28[1],zmm19[3],zmm28[3],zmm19[5],zmm28[5],zmm19[7],zmm28[7]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm6, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm18
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm24[1],zmm28[1],zmm24[3],zmm28[3],zmm24[5],zmm28[5],zmm24[7],zmm28[7]
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm8, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm14, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = [5,13]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm11, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm5
 ; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm3, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm4, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm16[0],zmm22[0],zmm16[2],zmm22[2],zmm16[4],zmm22[4],zmm16[6],zmm22[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%rcx), %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm29, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm29, %zmm3
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm16[1],zmm22[1],zmm16[3],zmm22[3],zmm16[5],zmm22[5],zmm16[7],zmm22[7]
-; AVX512F-NEXT:    vpermt2q %zmm21, %zmm0, %zmm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm12, %zmm1
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm20, %zmm5
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm15[0],zmm19[0],zmm15[2],zmm19[2],zmm15[4],zmm19[4],zmm15[6],zmm19[6]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = [6,14]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm11, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm22, %zmm5
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm15[1],zmm19[1],zmm15[3],zmm19[3],zmm15[5],zmm19[5],zmm15[7],zmm19[7]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [7,15,7,15]
+; AVX512F-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [7,15]
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm5, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm12, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm6, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm14, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm11, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm4, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm6, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm4, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm10, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%rcx), %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm13, %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm13, %zmm14
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm14
 ; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm13, %zmm4
+; AVX512F-NEXT:    vmovdqa64 192(%rcx), %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm14, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm15, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm25, %zmm29, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%r10), %zmm30
-; AVX512F-NEXT:    vmovdqa64 128(%rax), %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm8, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%r8), %zmm21
-; AVX512F-NEXT:    vmovdqa64 128(%r9), %zmm9
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm7, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-NEXT:    vmovdqa64 192(%r10), %zmm14
-; AVX512F-NEXT:    vmovdqa64 192(%rax), %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm15
-; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm8, %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm14, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm14, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm11, %zmm14
+; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 64(%r10), %zmm27
+; AVX512F-NEXT:    vmovdqa64 64(%rax), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm17, %zmm4
+; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm16
+; AVX512F-NEXT:    vmovdqa64 64(%r9), %zmm26
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm16[0],zmm26[0],zmm16[2],zmm26[2],zmm16[4],zmm26[4],zmm16[6],zmm26[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm10, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm16[1],zmm26[1],zmm16[3],zmm26[3],zmm16[5],zmm26[5],zmm16[7],zmm26[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [5,13]
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm8, %zmm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
-; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%r8), %zmm31
-; AVX512F-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm7, %zmm27
-; AVX512F-NEXT:    vpermi2q %zmm0, %zmm1, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,14]
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm6, %zmm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm20, %zmm3
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm27[0],zmm1[0],zmm27[2],zmm1[2],zmm27[4],zmm1[4],zmm27[6],zmm1[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm0, %zmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm22, %zmm3
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm27[1],zmm1[1],zmm27[3],zmm1[3],zmm27[5],zmm1[5],zmm27[7],zmm1[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm12, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm8, %zmm7
 ; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm20
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm29, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,1,9,1,9,1,9]
-; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm6, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm5
+; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm12
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm31
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm21, %zmm31
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm23, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [2,10,2,10,2,10,2,10]
-; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm29, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10]
 ; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm0, %zmm17
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm2, %zmm13
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm1, %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm29, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm18, %zmm0, %zmm23
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm2, %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm23
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm29, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm0, %zmm16
-; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm2, %zmm19
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm29, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm0, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm2, %zmm15
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm16
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm1, %zmm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm30[0],zmm6[0],zmm30[2],zmm6[2],zmm30[4],zmm6[4],zmm30[6],zmm6[6]
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm20 {%k1} = zmm30[1],zmm6[1],zmm30[3],zmm6[3],zmm30[5],zmm6[5],zmm30[7],zmm6[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm29, %zmm5
-; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm0, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm17
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm2, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm1, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm11
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm29, %zmm11
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm7 = zmm21[0],zmm9[0],zmm21[2],zmm9[2],zmm21[4],zmm9[4],zmm21[6],zmm9[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm21[1],zmm9[1],zmm21[3],zmm9[3],zmm21[5],zmm9[5],zmm21[7],zmm9[7]
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm2, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm25
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k1} = zmm14[0],zmm4[0],zmm14[2],zmm4[2],zmm14[4],zmm4[4],zmm14[6],zmm4[6]
-; AVX512F-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm14[1],zmm4[1],zmm14[3],zmm4[3],zmm14[5],zmm4[5],zmm14[7],zmm4[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm29, %zmm27
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm14
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm31, %zmm2
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm31, %zmm1
-; AVX512F-NEXT:    vpermi2q %zmm3, %zmm31, %zmm29
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm31[0],zmm3[0],zmm31[2],zmm3[2],zmm31[4],zmm3[4],zmm31[6],zmm3[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm31[1],zmm3[1],zmm31[3],zmm3[3],zmm31[5],zmm3[5],zmm31[7],zmm3[7]
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm0, %zmm31
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm4, %zmm15
+; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm23, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm27
+; AVX512F-NEXT:    vmovdqa64 128(%r10), %zmm19
+; AVX512F-NEXT:    vmovdqa64 128(%rax), %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm21, %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm23, %zmm30
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm17, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 128(%r8), %zmm14
+; AVX512F-NEXT:    vmovdqa64 128(%r9), %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm18
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm20, %zmm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k1} = zmm19[0],zmm1[0],zmm19[2],zmm1[2],zmm19[4],zmm1[4],zmm19[6],zmm1[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm17 {%k1} = zmm19[1],zmm1[1],zmm19[3],zmm1[3],zmm19[5],zmm1[5],zmm19[7],zmm1[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
+; AVX512F-NEXT:    vmovdqa64 192(%r10), %zmm10
+; AVX512F-NEXT:    vmovdqa64 192(%rax), %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm10, %zmm21
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm10, %zmm23
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm10, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm10, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 192(%r8), %zmm5
+; AVX512F-NEXT:    vmovdqa64 192(%r9), %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm5, %zmm20
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k1} = zmm10[0],zmm1[0],zmm10[2],zmm1[2],zmm10[4],zmm1[4],zmm10[6],zmm1[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm5, %zmm22
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm22 {%k1} = zmm10[1],zmm1[1],zmm10[3],zmm1[3],zmm10[5],zmm1[5],zmm10[7],zmm1[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm10, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm22
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm2, %zmm24
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm25
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm8, %zmm20
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm12, %zmm22
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm2, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm1, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm28
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm8, %zmm28
+; AVX512F-NEXT:    vpermt2q %zmm26, %zmm12, %zmm16
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm2, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm1, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm26
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm26
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm14[0],zmm3[0],zmm14[2],zmm3[2],zmm14[4],zmm3[4],zmm14[6],zmm3[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm14[1],zmm3[1],zmm14[3],zmm3[3],zmm14[5],zmm3[5],zmm14[7],zmm3[7]
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm12, %zmm14
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm5, %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm0, %zmm5, %zmm8
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm5[0],zmm0[0],zmm5[2],zmm0[2],zmm5[4],zmm0[4],zmm5[6],zmm0[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm15 = zmm5[1],zmm0[1],zmm5[3],zmm0[3],zmm5[5],zmm0[5],zmm5[7],zmm0[7]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm12, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm24 {%k1}
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, (%rcx), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm3, %ymm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm28 = ymm3[0],ymm0[0],ymm3[2],ymm0[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm28, %zmm13, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm12 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm3[1],ymm0[1],ymm3[3],ymm0[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm12
+; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm12, %ymm12
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm31 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm31, %zmm24, %zmm24
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm25 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm25, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm19 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
 ; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, 64(%rcx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm12
 ; AVX512F-NEXT:    vinserti128 $1, 64(%rdx), %ymm12, %ymm12
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm19, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm18 {%k1}
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm24, %zmm9, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm11 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm15, %zmm17 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm7 {%k1}
 ; AVX512F-NEXT:    vmovdqa 128(%rsi), %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, 128(%rcx), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX512F-NEXT:    vinserti128 $1, 128(%rdx), %ymm12, %ymm13
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm17, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm5 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm13[1],ymm0[1],ymm13[3],ymm0[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %xmm9
+; AVX512F-NEXT:    vinserti128 $1, 128(%rdx), %ymm9, %ymm9
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm9[0],ymm0[0],ymm9[2],ymm0[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm7, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm13 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm9[1],ymm0[1],ymm9[3],ymm0[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm13, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm28
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm8, %zmm26
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm5, %zmm0, %zmm19
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm31
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm20, %zmm18
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm2 {%k1}
-; AVX512F-NEXT:    vmovdqa 192(%rsi), %xmm7
-; AVX512F-NEXT:    vinserti128 $1, 192(%rcx), %ymm7, %ymm7
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %xmm10
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdx), %ymm10, %ymm10
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm7[0],ymm10[2],ymm7[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm15, %zmm2, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm1 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm10[1],ymm7[1],ymm10[3],ymm7[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm1, %zmm1
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm30
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm29
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm2 {%k1}
+; AVX512F-NEXT:    vmovdqa 192(%rsi), %xmm0
+; AVX512F-NEXT:    vinserti128 $1, 192(%rcx), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %xmm6
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdx), %ymm6, %ymm6
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm2, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm6[1],ymm0[1],ymm6[3],ymm0[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm24
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm7 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm4, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm23
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm4 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm3, %zmm0, %zmm3
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm6, %zmm0, %zmm10
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm11 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm11, %zmm0, %zmm11
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm9, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
+; AVX512F-NEXT:    vmovdqa (%rcx), %ymm15
+; AVX512F-NEXT:    vmovdqa64 (%rdx), %ymm17
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm17[0],ymm15[0],ymm17[2],ymm15[2]
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %ymm18
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm21
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm21[0],ymm18[0],ymm21[2],ymm18[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm12[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm20, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-NEXT:    vmovdqa (%rcx), %ymm6
-; AVX512F-NEXT:    vmovdqa64 (%rdx), %ymm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm16[0],ymm6[0],ymm16[2],ymm6[2]
-; AVX512F-NEXT:    vmovdqa64 (%rsi), %ymm20
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm23[0],ymm20[0],ymm23[2],ymm20[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm12[2,3],ymm3[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm24, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm17[1],ymm15[1],ymm17[3],ymm15[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm21[1],ymm18[1],ymm21[3],ymm18[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm15[2,3],ymm12[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm22, %zmm12
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm16[1],ymm6[1],ymm16[3],ymm6[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm23[1],ymm20[1],ymm23[3],ymm20[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm12[2,3],ymm6[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm2, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512F-NEXT:    vmovdqa 64(%rcx), %ymm15
+; AVX512F-NEXT:    vmovdqa64 64(%rdx), %ymm17
+; AVX512F-NEXT:    vmovdqa64 64(%rsi), %ymm18
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm20
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm17[0],ymm15[0],ymm17[2],ymm15[2]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm20[0],ymm18[0],ymm20[2],ymm18[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm13[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm28, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm16 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm17[1],ymm15[1],ymm17[3],ymm15[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm20[1],ymm18[1],ymm20[3],ymm18[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm16, %zmm13
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
-; AVX512F-NEXT:    vmovdqa 64(%rcx), %ymm12
-; AVX512F-NEXT:    vmovdqa64 64(%rdx), %ymm16
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %ymm20
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm16[0],ymm12[0],ymm16[2],ymm12[2]
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm23[0],ymm20[0],ymm23[2],ymm20[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm16[1],ymm12[1],ymm16[3],ymm12[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm23[1],ymm20[1],ymm23[3],ymm20[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm13[2,3],ymm12[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k1}
-; AVX512F-NEXT:    vmovdqa 128(%rcx), %ymm13
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512F-NEXT:    vmovdqa 128(%rcx), %ymm15
 ; AVX512F-NEXT:    vmovdqa64 128(%rdx), %ymm16
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm16[0],ymm13[0],ymm16[2],ymm13[2]
-; AVX512F-NEXT:    vmovdqa64 128(%rsi), %ymm20
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm22
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm22[0],ymm20[0],ymm22[2],ymm20[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm8[2,3],ymm5[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm11, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm21 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm16[1],ymm13[1],ymm16[3],ymm13[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm11 = ymm22[1],ymm20[1],ymm22[3],ymm20[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm11[2,3],ymm8[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm21, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm29 {%k1}
-; AVX512F-NEXT:    vmovdqa 192(%rcx), %ymm9
-; AVX512F-NEXT:    vmovdqa 192(%rdx), %ymm11
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm11[0],ymm9[0],ymm11[2],ymm9[2]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm16[0],ymm15[0],ymm16[2],ymm15[2]
+; AVX512F-NEXT:    vmovdqa64 128(%rsi), %ymm17
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm18[0],ymm17[0],ymm18[2],ymm17[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm9[2,3],ymm6[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm26, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm14 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm16[1],ymm15[1],ymm16[3],ymm15[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm18[1],ymm17[1],ymm18[3],ymm17[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm15[2,3],ymm9[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm14, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa 192(%rcx), %ymm14
+; AVX512F-NEXT:    vmovdqa 192(%rdx), %ymm15
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
 ; AVX512F-NEXT:    vmovdqa64 192(%rsi), %ymm16
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm20
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm20[0],ymm16[0],ymm20[2],ymm16[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm2[2,3],ymm13[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm29, %zmm2
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm31 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm11[1],ymm9[1],ymm11[3],ymm9[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm11 = ymm20[1],ymm16[1],ymm20[3],ymm16[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm11[2,3],ymm9[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm31, %zmm9
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm17
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm17[0],ymm16[0],ymm17[2],ymm16[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm17[1],ymm16[1],ymm17[3],ymm16[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm5, %zmm2
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 1664(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 1216(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 1152(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 704(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm0, 640(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm15, 1984(%rax)
-; AVX512F-NEXT:    vmovaps %zmm10, 1920(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 1856(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 1792(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 1536(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm18, 1472(%rax)
-; AVX512F-NEXT:    vmovaps %zmm19, 1408(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm28, 1280(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 1728(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 1664(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 1216(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 1152(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 640(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 128(%rax)
+; AVX512F-NEXT:    vmovaps %zmm11, 1984(%rax)
+; AVX512F-NEXT:    vmovaps %zmm3, 1920(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 1856(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 1792(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm24, 1600(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, 1536(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm29, 1472(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm30, 1408(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm31, 1344(%rax)
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 1280(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1088(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -4621,463 +4669,478 @@ define void @store_i64_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512F-NEXT:    addq $2632, %rsp # imm = 0xA48
+; AVX512F-NEXT:    addq $2504, %rsp # imm = 0x9C8
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_i64_stride8_vf32:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $2632, %rsp # imm = 0xA48
+; AVX512BW-NEXT:    subq $2504, %rsp # imm = 0x9C8
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    vmovaps 128(%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm3
+; AVX512BW-NEXT:    vmovaps 192(%rdx), %zmm0
 ; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm0
-; AVX512BW-NEXT:    vmovaps 192(%rdx), %zmm2
-; AVX512BW-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %zmm13
-; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm18
-; AVX512BW-NEXT:    vmovdqa64 64(%r9), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 64(%r10), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm24
-; AVX512BW-NEXT:    vmovdqa64 64(%rax), %zmm22
+; AVX512BW-NEXT:    vmovaps 128(%rdx), %zmm0
+; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm24
+; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm28
+; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm15
+; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm19
 ; AVX512BW-NEXT:    movb $-64, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k1
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [4,12,4,12,4,12,4,12]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm15, %zmm9
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm23[0],zmm18[0],zmm23[2],zmm18[2],zmm23[4],zmm18[4],zmm23[6],zmm18[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm9
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,4,12]
-; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [5,13,5,13,5,13,5,13]
-; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm8, %zmm9
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm23[1],zmm18[1],zmm23[3],zmm18[3],zmm23[5],zmm18[5],zmm23[7],zmm18[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm9
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm10, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm3, %zmm9
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm17[0],zmm24[0],zmm17[2],zmm24[2],zmm17[4],zmm24[4],zmm17[6],zmm24[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm10
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [6,14,6,14]
-; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm9, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm9
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm29, %zmm9
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm5
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [7,15,7,15]
-; AVX512BW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm5
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 = zmm19[0],zmm28[0],zmm19[2],zmm28[2],zmm19[4],zmm28[4],zmm19[6],zmm28[6]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm17
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 = zmm24[0],zmm28[0],zmm24[2],zmm28[2],zmm24[4],zmm28[4],zmm24[6],zmm28[6]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm12, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,12]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm12, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm8, %zmm5
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm19[1],zmm28[1],zmm19[3],zmm28[3],zmm19[5],zmm28[5],zmm19[7],zmm28[7]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm18
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm24[1],zmm28[1],zmm24[3],zmm28[3],zmm24[5],zmm28[5],zmm24[7],zmm28[7]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm8, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm14, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [5,13]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm5
 ; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm3, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm16[0],zmm22[0],zmm16[2],zmm22[2],zmm16[4],zmm22[4],zmm16[6],zmm22[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm29, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm29, %zmm3
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm16[1],zmm22[1],zmm16[3],zmm22[3],zmm16[5],zmm22[5],zmm16[7],zmm22[7]
-; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm2
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm12, %zmm1
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm20, %zmm5
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm15[0],zmm19[0],zmm15[2],zmm19[2],zmm15[4],zmm19[4],zmm15[6],zmm19[6]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm6
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [6,14]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm22 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm22, %zmm5
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm15[1],zmm19[1],zmm15[3],zmm19[3],zmm15[5],zmm19[5],zmm15[7],zmm19[7]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [7,15,7,15]
+; AVX512BW-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [7,15]
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm5, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm4, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm6, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 192(%rcx), %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm13, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm13, %zmm14
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm14, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm14, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm14, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm14
 ; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm13, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 64(%r10), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 64(%rax), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 64(%r9), %zmm26
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm16[0],zmm26[0],zmm16[2],zmm26[2],zmm16[4],zmm26[4],zmm16[6],zmm26[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm10, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 = zmm16[1],zmm26[1],zmm16[3],zmm26[3],zmm16[5],zmm26[5],zmm16[7],zmm26[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [5,13]
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm8, %zmm3
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm4, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,14]
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm2
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm20, %zmm3
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 {%k1} = zmm27[0],zmm1[0],zmm27[2],zmm1[2],zmm27[4],zmm1[4],zmm27[6],zmm1[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm13
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm22, %zmm3
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm27[1],zmm1[1],zmm27[3],zmm1[3],zmm27[5],zmm1[5],zmm27[7],zmm1[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm12, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm15, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm29, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%r10), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 128(%rax), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm8, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 128(%r9), %zmm9
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm7, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 192(%r10), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 192(%rax), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm0
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm2
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm31
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm21, %zmm31
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm23 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm23 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm23, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm4, %zmm15
 ; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm8, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
-; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm27
-; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 128(%r10), %zmm19
+; AVX512BW-NEXT:    vmovdqa64 128(%rax), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm29
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm23, %zmm30
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm14
+; AVX512BW-NEXT:    vmovdqa64 128(%r9), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm18
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm20, %zmm18
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm18 {%k1} = zmm19[0],zmm1[0],zmm19[2],zmm1[2],zmm19[4],zmm1[4],zmm19[6],zmm1[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm17 {%k1} = zmm19[1],zmm1[1],zmm19[3],zmm1[3],zmm19[5],zmm1[5],zmm19[7],zmm1[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 192(%r10), %zmm10
+; AVX512BW-NEXT:    vmovdqa64 192(%rax), %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm10, %zmm21
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm10, %zmm23
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm10, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm10, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 192(%r9), %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm20
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k1} = zmm10[0],zmm1[0],zmm10[2],zmm1[2],zmm10[4],zmm1[4],zmm10[6],zmm1[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm22
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm22 {%k1} = zmm10[1],zmm1[1],zmm10[3],zmm1[3],zmm10[5],zmm1[5],zmm10[7],zmm1[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm10, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm22
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm2, %zmm24
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,1,9,1,9,1,9]
 ; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [2,10,2,10,2,10,2,10]
-; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm29, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [3,11,3,11,3,11,3,11]
-; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm2, %zmm13
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm1, %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm29, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm23
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm29, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm2, %zmm19
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm29, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm16
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm30[0],zmm6[0],zmm30[2],zmm6[2],zmm30[4],zmm6[4],zmm30[6],zmm6[6]
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm20 {%k1} = zmm30[1],zmm6[1],zmm30[3],zmm6[3],zmm30[5],zmm6[5],zmm30[7],zmm6[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm5
-; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm17
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm11
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm11
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 = zmm21[0],zmm9[0],zmm21[2],zmm9[2],zmm21[4],zmm9[4],zmm21[6],zmm9[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm21[1],zmm9[1],zmm21[3],zmm9[3],zmm21[5],zmm9[5],zmm21[7],zmm9[7]
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm25
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm27 {%k1} = zmm14[0],zmm4[0],zmm14[2],zmm4[2],zmm14[4],zmm4[4],zmm14[6],zmm4[6]
-; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm9 {%k1} = zmm14[1],zmm4[1],zmm14[3],zmm4[3],zmm14[5],zmm4[5],zmm14[7],zmm4[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm27
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm14
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm31, %zmm2
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm31, %zmm1
-; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm31, %zmm29
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm31[0],zmm3[0],zmm31[2],zmm3[2],zmm31[4],zmm3[4],zmm31[6],zmm3[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm31[1],zmm3[1],zmm31[3],zmm3[3],zmm31[5],zmm3[5],zmm31[7],zmm3[7]
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm31
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm25
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm8, %zmm20
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm12, %zmm22
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm2, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm8, %zmm28
+; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm12, %zmm16
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm26
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm26
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm14[0],zmm3[0],zmm14[2],zmm3[2],zmm14[4],zmm3[4],zmm14[6],zmm3[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm14[1],zmm3[1],zmm14[3],zmm3[3],zmm14[5],zmm3[5],zmm14[7],zmm3[7]
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm12, %zmm14
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm8
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm3 = zmm5[0],zmm0[0],zmm5[2],zmm0[2],zmm5[4],zmm0[4],zmm5[6],zmm0[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm15 = zmm5[1],zmm0[1],zmm5[3],zmm0[3],zmm5[5],zmm0[5],zmm5[7],zmm0[7]
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm12, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm24 {%k1}
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm3, %ymm3
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm28 = ymm3[0],ymm0[0],ymm3[2],ymm0[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm28, %zmm13, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm12 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm3[1],ymm0[1],ymm3[3],ymm0[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm12
+; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm12, %ymm12
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm31 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm31, %zmm24, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm25 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm25, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm19 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512BW-NEXT:    vinserti128 $1, 64(%rcx), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm12
 ; AVX512BW-NEXT:    vinserti128 $1, 64(%rdx), %ymm12, %ymm12
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm19, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm18 {%k1}
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm24 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm24, %zmm9, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm11 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm7 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 128(%rsi), %xmm0
 ; AVX512BW-NEXT:    vinserti128 $1, 128(%rcx), %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX512BW-NEXT:    vinserti128 $1, 128(%rdx), %ymm12, %ymm13
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm17, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm5 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm13[1],ymm0[1],ymm13[3],ymm0[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, 128(%rdx), %ymm9, %ymm9
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm9[0],ymm0[0],ymm9[2],ymm0[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm7, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm13 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm9[1],ymm0[1],ymm9[3],ymm0[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm13, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm28
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm8, %zmm26
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm5, %zmm0, %zmm19
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm31
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm5 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm20, %zmm18
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rsi), %xmm7
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rcx), %ymm7, %ymm7
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %xmm10
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdx), %ymm10, %ymm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm15 = ymm10[0],ymm7[0],ymm10[2],ymm7[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm15, %zmm2, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm1 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm10[1],ymm7[1],ymm10[3],ymm7[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm30
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm29
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm2 {%k1}
+; AVX512BW-NEXT:    vmovdqa 192(%rsi), %xmm0
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rcx), %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %xmm6
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdx), %ymm6, %ymm6
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm2, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm1 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm6[1],ymm0[1],ymm6[3],ymm0[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm24
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm7 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm4, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6 {%k1}
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm23
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm4 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm15, %zmm1
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm3, %zmm0, %zmm3
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm6, %zmm0, %zmm10
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm11 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm11, %zmm0, %zmm11
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm6 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm9, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
+; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm15
+; AVX512BW-NEXT:    vmovdqa64 (%rdx), %ymm17
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm17[0],ymm15[0],ymm17[2],ymm15[2]
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %ymm18
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm21
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm21[0],ymm18[0],ymm21[2],ymm18[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm4[2,3],ymm12[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm20, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm6
-; AVX512BW-NEXT:    vmovdqa64 (%rdx), %ymm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm16[0],ymm6[0],ymm16[2],ymm6[2]
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %ymm20
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm23[0],ymm20[0],ymm23[2],ymm20[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm12[2,3],ymm3[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm24, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm17[1],ymm15[1],ymm17[3],ymm15[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm21[1],ymm18[1],ymm21[3],ymm18[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm15[2,3],ymm12[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm22, %zmm12
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm6 = ymm16[1],ymm6[1],ymm16[3],ymm6[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm23[1],ymm20[1],ymm23[3],ymm20[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm12[2,3],ymm6[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm2, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm28 {%k1}
+; AVX512BW-NEXT:    vmovdqa 64(%rcx), %ymm15
+; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %ymm17
+; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %ymm18
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm20
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm17[0],ymm15[0],ymm17[2],ymm15[2]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm20[0],ymm18[0],ymm20[2],ymm18[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm13[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm28, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm16 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm17[1],ymm15[1],ymm17[3],ymm15[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm20[1],ymm18[1],ymm20[3],ymm18[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm15[2,3],ymm13[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm16, %zmm13
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k1}
-; AVX512BW-NEXT:    vmovdqa 64(%rcx), %ymm12
-; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %ymm16
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %ymm20
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm16[0],ymm12[0],ymm16[2],ymm12[2]
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm23[0],ymm20[0],ymm23[2],ymm20[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm12 = ymm16[1],ymm12[1],ymm16[3],ymm12[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm23[1],ymm20[1],ymm23[3],ymm20[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm13[2,3],ymm12[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k1}
-; AVX512BW-NEXT:    vmovdqa 128(%rcx), %ymm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512BW-NEXT:    vmovdqa 128(%rcx), %ymm15
 ; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %ymm16
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm16[0],ymm13[0],ymm16[2],ymm13[2]
-; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %ymm20
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm22
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm22[0],ymm20[0],ymm22[2],ymm20[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm8[2,3],ymm5[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm11, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm21 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm16[1],ymm13[1],ymm16[3],ymm13[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm11 = ymm22[1],ymm20[1],ymm22[3],ymm20[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm11[2,3],ymm8[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm21, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm29 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rcx), %ymm9
-; AVX512BW-NEXT:    vmovdqa 192(%rdx), %ymm11
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm11[0],ymm9[0],ymm11[2],ymm9[2]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm16[0],ymm15[0],ymm16[2],ymm15[2]
+; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %ymm17
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm18
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm18[0],ymm17[0],ymm18[2],ymm17[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm9[2,3],ymm6[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm26, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm14 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm16[1],ymm15[1],ymm16[3],ymm15[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm15 = ymm18[1],ymm17[1],ymm18[3],ymm17[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm15[2,3],ymm9[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm14, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa 192(%rcx), %ymm14
+; AVX512BW-NEXT:    vmovdqa 192(%rdx), %ymm15
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm15[0],ymm14[0],ymm15[2],ymm14[2]
 ; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %ymm16
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %ymm20
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm20[0],ymm16[0],ymm20[2],ymm16[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm2[2,3],ymm13[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm29, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm31 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm11[1],ymm9[1],ymm11[3],ymm9[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm11 = ymm20[1],ymm16[1],ymm20[3],ymm16[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm11[2,3],ymm9[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm31, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %ymm17
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm17[0],ymm16[0],ymm17[2],ymm16[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm8, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm15[1],ymm14[1],ymm15[3],ymm14[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm8 = ymm17[1],ymm16[1],ymm17[3],ymm16[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm5, %zmm2
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 1664(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, 1216(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 1152(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 704(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, 640(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, 1984(%rax)
-; AVX512BW-NEXT:    vmovaps %zmm10, 1920(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 1856(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 1792(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 1536(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm18, 1472(%rax)
-; AVX512BW-NEXT:    vmovaps %zmm19, 1408(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm28, 1280(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 1728(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 1664(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 1216(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 1152(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 640(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 128(%rax)
+; AVX512BW-NEXT:    vmovaps %zmm11, 1984(%rax)
+; AVX512BW-NEXT:    vmovaps %zmm3, 1920(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 1856(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 1792(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, 1600(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 1536(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, 1472(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm30, 1408(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, 1344(%rax)
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 1280(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1088(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -5106,7 +5169,7 @@ define void @store_i64_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 64(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512BW-NEXT:    addq $2632, %rsp # imm = 0xA48
+; AVX512BW-NEXT:    addq $2504, %rsp # imm = 0x9C8
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <32 x i64>, ptr %in.vecptr0, align 64
@@ -8878,899 +8941,936 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-LABEL: store_i64_stride8_vf64:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $5512, %rsp # imm = 0x1588
+; AVX512F-NEXT:    subq $5704, %rsp # imm = 0x1648
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512F-NEXT:    vmovdqa64 64(%rsi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm8
 ; AVX512F-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512F-NEXT:    vmovdqa64 (%rdx), %zmm9
+; AVX512F-NEXT:    vmovdqa64 64(%rcx), %zmm6
+; AVX512F-NEXT:    vmovdqa64 (%rcx), %zmm10
 ; AVX512F-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512F-NEXT:    vmovdqa64 128(%r8), %zmm23
+; AVX512F-NEXT:    vmovdqa64 64(%r8), %zmm2
 ; AVX512F-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512F-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512F-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512F-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512F-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512F-NEXT:    vmovdqa64 64(%rax), %zmm16
+; AVX512F-NEXT:    vmovdqa64 64(%r9), %zmm20
+; AVX512F-NEXT:    vmovdqa64 (%r10), %zmm16
+; AVX512F-NEXT:    vmovdqa64 64(%r10), %zmm24
+; AVX512F-NEXT:    vmovdqa64 (%rax), %zmm21
+; AVX512F-NEXT:    vmovdqa64 64(%rax), %zmm22
 ; AVX512F-NEXT:    movb $-64, %r11b
 ; AVX512F-NEXT:    kmovw %r11d, %k1
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512F-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512F-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [4,12,4,12,4,12,4,12]
+; AVX512F-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm30, %zmm0
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm11 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm12, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm15
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,12]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm13, %zmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512F-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm11, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm19
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm11 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm17
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13]
 ; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512F-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512F-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,13]
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512F-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512F-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512F-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512F-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm1, %zmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512F-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512F-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512F-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512F-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512F-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm18
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm16[0],zmm21[0],zmm16[2],zmm21[2],zmm16[4],zmm21[4],zmm16[6],zmm21[6]
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,6,14]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm14, %zmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,14]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm1, %zmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm0, %zmm11
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm25
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm11 {%k1} = zmm16[1],zmm21[1],zmm16[3],zmm21[3],zmm16[5],zmm21[5],zmm16[7],zmm21[7]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} ymm17 = [7,15,7,15]
+; AVX512F-NEXT:    # ymm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm17, %zmm9
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} xmm26 = [7,15]
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm26, %zmm7
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm11, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm30, %zmm7
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm2[0],zmm20[0],zmm2[2],zmm20[2],zmm2[4],zmm20[4],zmm2[6],zmm20[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm13, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm11
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm2[1],zmm20[1],zmm2[3],zmm20[3],zmm2[5],zmm20[5],zmm2[7],zmm20[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm13, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,13]
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm0, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm14, %zmm7
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [6,14]
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm10, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm20, %zmm9
+; AVX512F-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm18, %zmm8
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm24[0],zmm22[0],zmm24[2],zmm22[2],zmm24[4],zmm22[4],zmm24[6],zmm22[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 128(%r8), %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm17, %zmm5
+; AVX512F-NEXT:    vmovdqa64 128(%r10), %zmm31
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm26, %zmm3
+; AVX512F-NEXT:    vmovdqa64 128(%rax), %zmm20
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm25, %zmm4
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm24[1],zmm22[1],zmm24[3],zmm22[3],zmm24[5],zmm22[5],zmm24[7],zmm22[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm30, %zmm3
+; AVX512F-NEXT:    vmovdqa64 128(%r9), %zmm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm12[0],zmm2[0],zmm12[2],zmm2[2],zmm12[4],zmm2[4],zmm12[6],zmm2[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm23
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 128(%rdx), %zmm3
+; AVX512F-NEXT:    vmovdqa64 128(%rcx), %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 128(%rsi), %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm19, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm27
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm7 = zmm12[1],zmm23[1],zmm12[3],zmm23[3],zmm12[5],zmm23[5],zmm12[7],zmm23[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm13, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm0, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm14, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm10, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm18, %zmm7
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k1} = zmm31[0],zmm20[0],zmm31[2],zmm20[2],zmm31[4],zmm20[4],zmm31[6],zmm20[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm17, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm26, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm25, %zmm4
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm31[1],zmm20[1],zmm31[3],zmm20[3],zmm31[5],zmm20[5],zmm31[7],zmm20[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 192(%r10), %zmm23
+; AVX512F-NEXT:    vmovdqa64 192(%rax), %zmm19
 ; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm30, %zmm3
+; AVX512F-NEXT:    vmovdqa64 192(%r8), %zmm0
+; AVX512F-NEXT:    vmovdqa64 192(%r9), %zmm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm0[0],zmm4[0],zmm0[2],zmm4[2],zmm0[4],zmm4[4],zmm0[6],zmm4[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa64 192(%rdx), %zmm3
+; AVX512F-NEXT:    vmovdqa64 192(%rcx), %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,12,4,12]
+; AVX512F-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm14, %zmm7
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 192(%rsi), %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm2, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm27, %zmm5
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm7 = zmm0[1],zmm10[1],zmm0[3],zmm10[3],zmm0[5],zmm10[5],zmm0[7],zmm10[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = [5,13]
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm12, %zmm9
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm13, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm11, %zmm7
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512F-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm18, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k1} = zmm23[0],zmm19[0],zmm23[2],zmm19[2],zmm23[4],zmm19[4],zmm23[6],zmm19[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm6, %zmm17, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm8, %zmm26, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm10, %zmm25, %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm25, %zmm29
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm23[1],zmm19[1],zmm23[3],zmm19[3],zmm23[5],zmm19[5],zmm23[7],zmm19[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 256(%r10), %zmm18
+; AVX512F-NEXT:    vmovdqa64 256(%rax), %zmm13
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm30, %zmm3
 ; AVX512F-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512F-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512F-NEXT:    vmovdqa64 256(%r9), %zmm25
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm0[0],zmm25[0],zmm0[2],zmm25[2],zmm0[4],zmm25[4],zmm0[6],zmm25[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 256(%rdx), %zmm3
 ; AVX512F-NEXT:    vmovdqa64 256(%rcx), %zmm7
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm14, %zmm8
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 256(%rsi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,12]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm8, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm27, %zmm5
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm1[1],zmm25[1],zmm1[3],zmm25[3],zmm1[5],zmm25[5],zmm1[7],zmm25[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm8 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,13]
+; AVX512F-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm6, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm12, %zmm10
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm8, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm15, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm11, %zmm8
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm2, %zmm8
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm18[0],zmm13[0],zmm18[2],zmm13[2],zmm18[4],zmm13[4],zmm18[6],zmm13[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm8, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm7, %zmm17, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm26, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm29, %zmm4
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm18[1],zmm13[1],zmm18[3],zmm13[3],zmm18[5],zmm13[5],zmm18[7],zmm13[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 320(%rdx), %zmm3
+; AVX512F-NEXT:    vmovdqa64 320(%rcx), %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm14, %zmm7
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512F-NEXT:    vmovdqa64 320(%rsi), %zmm9
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm0, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm15, %zmm12
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm17, %zmm3
+; AVX512F-NEXT:    vmovdqa64 384(%rdx), %zmm1
+; AVX512F-NEXT:    vmovdqa64 384(%rcx), %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm6, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512F-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512F-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512F-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512F-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512F-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm17, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512F-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
+; AVX512F-NEXT:    vmovdqa64 448(%rcx), %zmm5
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm0, %zmm10
+; AVX512F-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm0, %zmm11
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm5, %zmm0, %zmm15
 ; AVX512F-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512F-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512F-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512F-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512F-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm5, %zmm17, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512F-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512F-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512F-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512F-NEXT:    vmovdqa64 320(%r10), %zmm6
+; AVX512F-NEXT:    vmovdqa64 320(%rax), %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm30, %zmm5
+; AVX512F-NEXT:    vmovdqa64 320(%r8), %zmm27
+; AVX512F-NEXT:    vmovdqa64 320(%r9), %zmm17
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm27[0],zmm17[0],zmm27[2],zmm17[2],zmm27[4],zmm17[4],zmm27[6],zmm17[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm10, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [5,13,5,13,5,13,5,13]
+; AVX512F-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm29, %zmm5
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm7 = zmm27[1],zmm17[1],zmm27[3],zmm17[3],zmm27[5],zmm17[5],zmm27[7],zmm17[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = [5,13]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm10, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14]
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm8, %zmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [6,14,6,14,6,14,6,14]
+; AVX512F-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm11, %zmm7
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k1} = zmm6[0],zmm0[0],zmm6[2],zmm0[2],zmm6[4],zmm0[4],zmm6[6],zmm0[6]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512F-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm9, %zmm26, %zmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [7,15,7,15,7,15,7,15]
+; AVX512F-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm9, %zmm4
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm6[1],zmm0[1],zmm6[3],zmm0[3],zmm6[5],zmm0[5],zmm6[7],zmm0[7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512F-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 384(%rsi), %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm14, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512F-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm10, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512F-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm8, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm26, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512F-NEXT:    vmovdqa64 448(%rsi), %zmm3
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm1, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm1, %zmm5
+; AVX512F-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm1, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm26, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
 ; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512F-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512F-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm1, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512F-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm1, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm21, %zmm5, %zmm16
+; AVX512F-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm2, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm1, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm4, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm22, %zmm5, %zmm24
+; AVX512F-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm2, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512F-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512F-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
+; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm4, %zmm3
 ; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm20, %zmm5, %zmm31
+; AVX512F-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm4
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512F-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm2, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm1, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512F-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm7, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm19, %zmm5, %zmm23
+; AVX512F-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm3, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm1, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm7, %zmm4
+; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm13, %zmm5, %zmm18
+; AVX512F-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512F-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 384(%r10), %zmm19
+; AVX512F-NEXT:    vmovdqa64 384(%rax), %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512F-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512F-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512F-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512F-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512F-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512F-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512F-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512F-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm6
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm30, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512F-NEXT:    vmovdqa64 384(%r8), %zmm12
+; AVX512F-NEXT:    vmovdqa64 384(%r9), %zmm4
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm20
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm11, %zmm20
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k1} = zmm19[0],zmm0[0],zmm19[2],zmm0[2],zmm19[4],zmm0[4],zmm19[6],zmm0[6]
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm18
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm9, %zmm18
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm18 {%k1} = zmm19[1],zmm0[1],zmm19[3],zmm0[3],zmm19[5],zmm0[5],zmm19[7],zmm0[7]
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm8, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm19
+; AVX512F-NEXT:    vmovdqa64 448(%r10), %zmm16
+; AVX512F-NEXT:    vmovdqa64 448(%rax), %zmm1
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm16, %zmm7
+; AVX512F-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm16, %zmm6
+; AVX512F-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm16, %zmm30
+; AVX512F-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm16, %zmm3
+; AVX512F-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 448(%r8), %zmm9
+; AVX512F-NEXT:    vmovdqa64 448(%r9), %zmm3
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm9, %zmm11
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k1} = zmm16[0],zmm1[0],zmm16[2],zmm1[2],zmm16[4],zmm1[4],zmm16[6],zmm1[6]
+; AVX512F-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm9, %zmm0
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm16[1],zmm1[1],zmm16[3],zmm1[3],zmm16[5],zmm1[5],zmm16[7],zmm1[7]
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermi2q %zmm1, %zmm16, %zmm8
+; AVX512F-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm16
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm30
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm15, %zmm30
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512F-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm29
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm14, %zmm29
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,10,2,10,2,10,2,10]
+; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm7
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm5, %zmm7
+; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [3,11,3,11,3,11,3,11]
+; AVX512F-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-NEXT:    vpermt2q %zmm28, %zmm6, %zmm21
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm28
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm15, %zmm28
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm31
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm14, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm5, %zmm8
+; AVX512F-NEXT:    vpermt2q %zmm1, %zmm6, %zmm22
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm15, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm14, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm5, %zmm10
+; AVX512F-NEXT:    vpermt2q %zmm23, %zmm6, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm15, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm5, %zmm11
+; AVX512F-NEXT:    vpermt2q %zmm0, %zmm6, %zmm24
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm15, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm14, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm5, %zmm13
+; AVX512F-NEXT:    vpermt2q %zmm25, %zmm6, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm15, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm14, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm27, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm5, %zmm25
+; AVX512F-NEXT:    vpermt2q %zmm17, %zmm6, %zmm27
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm14, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm17
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm5, %zmm17
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm12[0],zmm4[0],zmm12[2],zmm4[2],zmm12[4],zmm4[4],zmm12[6],zmm4[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm12[1],zmm4[1],zmm12[3],zmm4[3],zmm12[5],zmm4[5],zmm12[7],zmm4[7]
+; AVX512F-NEXT:    vpermt2q %zmm4, %zmm6, %zmm12
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm9, %zmm15
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm9, %zmm14
+; AVX512F-NEXT:    vpermi2q %zmm3, %zmm9, %zmm5
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm9[0],zmm3[0],zmm9[2],zmm3[2],zmm9[4],zmm3[4],zmm9[6],zmm3[6]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm9[1],zmm3[1],zmm9[3],zmm3[3],zmm9[5],zmm3[5],zmm9[7],zmm3[7]
+; AVX512F-NEXT:    vpermt2q %zmm3, %zmm6, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm0 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
 ; AVX512F-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512F-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm7, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm21, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
 ; AVX512F-NEXT:    vmovdqa 64(%rcx), %ymm0
 ; AVX512F-NEXT:    vmovdqa 64(%rdx), %ymm1
 ; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm2
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm6[2,3],ymm4[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm8, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm22 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512F-NEXT:    vmovdqa 128(%rcx), %ymm0
 ; AVX512F-NEXT:    vmovdqa 128(%rdx), %ymm1
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512F-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512F-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm23, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512F-NEXT:    vmovdqa 192(%rcx), %ymm0
 ; AVX512F-NEXT:    vmovdqa 192(%rdx), %ymm1
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512F-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm11, %zmm2
 ; AVX512F-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm24 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm0
+; AVX512F-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
 ; AVX512F-NEXT:    vmovdqa 256(%rcx), %ymm0
 ; AVX512F-NEXT:    vmovdqa 256(%rdx), %ymm1
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512F-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
+; AVX512F-NEXT:    vmovdqa 256(%rdi), %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm20
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512F-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512F-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512F-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512F-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512F-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512F-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm26 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm0, %zmm26, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
+; AVX512F-NEXT:    vmovdqa 320(%rcx), %ymm1
+; AVX512F-NEXT:    vmovdqa 320(%rdx), %ymm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512F-NEXT:    vmovdqa 320(%rsi), %ymm4
+; AVX512F-NEXT:    vmovdqa 320(%rdi), %ymm6
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm7[2,3],ymm3[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm25, %zmm7
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm13
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
+; AVX512F-NEXT:    vmovdqa 384(%rcx), %ymm1
+; AVX512F-NEXT:    vmovdqa 384(%rdx), %ymm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512F-NEXT:    vmovdqa 384(%rsi), %ymm4
+; AVX512F-NEXT:    vmovdqa 384(%rdi), %ymm6
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm8[2,3],ymm3[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm17, %zmm25
+; AVX512F-NEXT:    vmovdqa64 %zmm19, %zmm12 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm19
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512F-NEXT:    vmovdqa 448(%rcx), %ymm1
+; AVX512F-NEXT:    vmovdqa 448(%rdx), %ymm2
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512F-NEXT:    vmovdqa 448(%rsi), %ymm4
+; AVX512F-NEXT:    vmovdqa 448(%rdi), %ymm6
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm8[2,3],ymm3[2,3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm10
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm9 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm11
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm30 {%k1}
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512F-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm30, %zmm8
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm29 {%k1}
 ; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512F-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm28 {%k1}
+; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm2
+; AVX512F-NEXT:    vinserti128 $1, 64(%rcx), %ymm2, %ymm3
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
+; AVX512F-NEXT:    vinserti128 $1, 64(%rdx), %ymm2, %ymm4
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm2, %zmm28, %zmm2
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm31 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm3, %zmm31, %zmm3
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512F-NEXT:    vmovdqa 128(%rsi), %xmm4
+; AVX512F-NEXT:    vinserti128 $1, 128(%rcx), %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %xmm5
+; AVX512F-NEXT:    vinserti128 $1, 128(%rdx), %ymm5, %ymm6
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm5
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512F-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512F-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512F-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm12 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm12, %zmm12
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm16 {%k1}
+; AVX512F-NEXT:    vmovdqa 192(%rsi), %xmm4
+; AVX512F-NEXT:    vinserti128 $1, 192(%rcx), %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %xmm6
+; AVX512F-NEXT:    vinserti128 $1, 192(%rdx), %ymm6, %ymm9
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm9[0],ymm4[0],ymm9[2],ymm4[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm6, %zmm16, %zmm6
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm17 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm9[1],ymm4[1],ymm9[3],ymm4[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm4, %zmm17, %zmm4
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512F-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512F-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512F-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k1}
+; AVX512F-NEXT:    vmovdqa 256(%rsi), %xmm9
+; AVX512F-NEXT:    vinserti128 $1, 256(%rcx), %ymm9, %ymm9
+; AVX512F-NEXT:    vmovdqa64 256(%rdi), %xmm16
+; AVX512F-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm16, %ymm16
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm17 = ymm16[0],ymm9[0],ymm16[2],ymm9[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm17, %zmm18, %zmm17
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512F-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512F-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512F-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512F-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm21 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm16[1],ymm9[1],ymm16[3],ymm9[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm21, %zmm18
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k1}
+; AVX512F-NEXT:    vmovdqa 320(%rsi), %xmm9
+; AVX512F-NEXT:    vinserti128 $1, 320(%rcx), %ymm9, %ymm9
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %xmm16
+; AVX512F-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm16, %ymm16
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm21 = ymm16[0],ymm9[0],ymm16[2],ymm9[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm21, %zmm22, %zmm21
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512F-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512F-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512F-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512F-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512F-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512F-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512F-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512F-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512F-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512F-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512F-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512F-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm23 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm16[1],ymm9[1],ymm16[3],ymm9[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm9, %zmm23, %zmm9
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm24 {%k1}
+; AVX512F-NEXT:    vmovdqa64 384(%rsi), %xmm16
+; AVX512F-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm16, %ymm16
+; AVX512F-NEXT:    vmovdqa64 384(%rdi), %xmm22
+; AVX512F-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm22, %ymm22
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm22[0],ymm16[0],ymm22[2],ymm16[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm23, %zmm24, %zmm23
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm24, %zmm26 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm16 = ymm22[1],ymm16[1],ymm22[3],ymm16[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm16, %zmm26, %zmm16
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k1}
+; AVX512F-NEXT:    vmovdqa64 448(%rsi), %xmm22
+; AVX512F-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm22, %ymm22
+; AVX512F-NEXT:    vmovdqa64 448(%rdi), %xmm24
+; AVX512F-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm24, %ymm24
+; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} ymm26 = ymm24[0],ymm22[0],ymm24[2],ymm22[2]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm26, %zmm15, %zmm15
+; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-NEXT:    vmovdqa64 %zmm26, %zmm14 {%k1}
+; AVX512F-NEXT:    vpunpckhqdq {{.*#+}} ymm22 = ymm24[1],ymm22[1],ymm24[3],ymm22[3]
+; AVX512F-NEXT:    vinserti64x4 $0, %ymm22, %zmm14, %zmm14
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm11, 3776(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, 3712(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm19, 3264(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm25, 3200(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm13, 2752(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm7, 2688(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 2240(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm20, 2176(%rax)
+; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovaps %zmm0, 1728(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1664(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512F-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1152(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 704(%rax)
@@ -9788,8 +9888,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 3904(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm14, 3648(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm15, 3584(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 3520(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9798,8 +9898,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 3392(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm16, 3136(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm23, 3072(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 3008(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9808,8 +9908,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 2880(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm9, 2624(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm21, 2560(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 2496(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9818,8 +9918,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 2368(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 2112(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 2048(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1984(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9828,8 +9928,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 1856(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm4, 1600(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm6, 1536(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1472(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9838,8 +9938,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm12, 1088(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 1024(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9848,8 +9948,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 832(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm10, 512(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm3, 576(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, 512(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9858,907 +9958,944 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovaps %zmm0, 320(%rax)
 ; AVX512F-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-NEXT:    addq $5512, %rsp # imm = 0x1588
+; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-NEXT:    addq $5704, %rsp # imm = 0x1648
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_i64_stride8_vf64:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    subq $5512, %rsp # imm = 0x1588
+; AVX512BW-NEXT:    subq $5704, %rsp # imm = 0x1648
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm8
 ; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm10
 ; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512BW-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512BW-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512BW-NEXT:    vmovdqa64 64(%rax), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 64(%r9), %zmm20
+; AVX512BW-NEXT:    vmovdqa64 (%r10), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 64(%r10), %zmm24
+; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm21
+; AVX512BW-NEXT:    vmovdqa64 64(%rax), %zmm22
 ; AVX512BW-NEXT:    movb $-64, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k1
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512BW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm30 = [4,12,4,12,4,12,4,12]
+; AVX512BW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm30, %zmm0
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm11 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm12, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm15
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,12]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm13, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512BW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm19
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm11 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm17
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [5,13,5,13]
 ; AVX512BW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512BW-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,13]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512BW-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm18
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm16[0],zmm21[0],zmm16[2],zmm21[2],zmm16[4],zmm21[4],zmm16[6],zmm21[6]
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [6,14,6,14]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm14, %zmm11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [6,14]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm12
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm11, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm0, %zmm11
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm25
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm11 {%k1} = zmm16[1],zmm21[1],zmm16[3],zmm21[3],zmm16[5],zmm21[5],zmm16[7],zmm21[7]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm17 = [7,15,7,15]
+; AVX512BW-NEXT:    # ymm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm17, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = [7,15]
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm11, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm7
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm2[0],zmm20[0],zmm2[2],zmm20[2],zmm2[4],zmm20[4],zmm2[6],zmm20[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm2[1],zmm20[1],zmm2[3],zmm20[3],zmm2[5],zmm20[5],zmm2[7],zmm20[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [5,13]
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm14, %zmm7
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [6,14]
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm18, %zmm8
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm24[0],zmm22[0],zmm24[2],zmm22[2],zmm24[4],zmm22[4],zmm24[6],zmm22[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm17, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 128(%r10), %zmm31
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm26, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 128(%rax), %zmm20
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm25, %zmm4
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm24[1],zmm22[1],zmm24[3],zmm22[3],zmm24[5],zmm22[5],zmm24[7],zmm22[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm30, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 128(%r9), %zmm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm12[0],zmm2[0],zmm12[2],zmm2[2],zmm12[4],zmm2[4],zmm12[6],zmm2[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm23
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm19, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm27
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm7 = zmm12[1],zmm23[1],zmm12[3],zmm23[3],zmm12[5],zmm23[5],zmm12[7],zmm23[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm0, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm14, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm18, %zmm7
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k1} = zmm31[0],zmm20[0],zmm31[2],zmm20[2],zmm31[4],zmm20[4],zmm31[6],zmm20[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm17, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm25, %zmm4
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm31[1],zmm20[1],zmm31[3],zmm20[3],zmm31[5],zmm20[5],zmm31[7],zmm20[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 192(%r10), %zmm23
+; AVX512BW-NEXT:    vmovdqa64 192(%rax), %zmm19
 ; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm30, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 192(%r9), %zmm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm0[0],zmm4[0],zmm0[2],zmm4[2],zmm0[4],zmm4[4],zmm0[6],zmm4[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 192(%rdx), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 192(%rcx), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [4,12,4,12]
+; AVX512BW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm14, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm27, %zmm5
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm7 = zmm0[1],zmm10[1],zmm0[3],zmm10[3],zmm0[5],zmm10[5],zmm0[7],zmm10[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [5,13]
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm12, %zmm9
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm13, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm11, %zmm7
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512BW-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm18, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k1} = zmm23[0],zmm19[0],zmm23[2],zmm19[2],zmm23[4],zmm19[4],zmm23[6],zmm19[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm17, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm26, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm25, %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm29
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm23[1],zmm19[1],zmm23[3],zmm19[3],zmm23[5],zmm19[5],zmm23[7],zmm19[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 256(%r10), %zmm18
+; AVX512BW-NEXT:    vmovdqa64 256(%rax), %zmm13
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm30, %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 256(%r9), %zmm25
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm0[0],zmm25[0],zmm0[2],zmm25[2],zmm0[4],zmm25[4],zmm0[6],zmm25[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
 ; AVX512BW-NEXT:    vmovdqa64 256(%rdx), %zmm3
 ; AVX512BW-NEXT:    vmovdqa64 256(%rcx), %zmm7
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm14, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 256(%rsi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,12]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm8, %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm27, %zmm5
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm8 = zmm1[1],zmm25[1],zmm1[3],zmm25[3],zmm1[5],zmm25[5],zmm1[7],zmm25[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm8 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,13]
+; AVX512BW-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm12, %zmm10
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm8, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm2, %zmm8
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 {%k1} = zmm18[0],zmm13[0],zmm18[2],zmm13[2],zmm18[4],zmm13[4],zmm18[6],zmm13[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm8, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm17, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm29, %zmm4
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm18[1],zmm13[1],zmm18[3],zmm13[3],zmm18[5],zmm13[5],zmm18[7],zmm13[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 320(%rdx), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 320(%rcx), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 320(%rsi), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm0, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm12
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm17, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 384(%rdx), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 384(%rcx), %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512BW-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512BW-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm17, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
+; AVX512BW-NEXT:    vmovdqa64 448(%rcx), %zmm5
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm0, %zmm10
+; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm0, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm0, %zmm15
 ; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512BW-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512BW-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512BW-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm17, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512BW-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 320(%r10), %zmm6
+; AVX512BW-NEXT:    vmovdqa64 320(%rax), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm30, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 320(%r8), %zmm27
+; AVX512BW-NEXT:    vmovdqa64 320(%r9), %zmm17
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm27[0],zmm17[0],zmm27[2],zmm17[2],zmm27[4],zmm17[4],zmm27[6],zmm17[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm10 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm10, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm5
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [5,13,5,13,5,13,5,13]
+; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm5
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm7 = zmm27[1],zmm17[1],zmm27[3],zmm17[3],zmm27[5],zmm17[5],zmm27[7],zmm17[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm10 = [5,13]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14]
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm8, %zmm5
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [6,14,6,14,6,14,6,14]
+; AVX512BW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm11, %zmm7
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm7 {%k1} = zmm6[0],zmm0[0],zmm6[2],zmm0[2],zmm6[4],zmm0[4],zmm6[6],zmm0[6]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm7, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512BW-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm4
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [7,15,7,15,7,15,7,15]
+; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm4
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm4 {%k1} = zmm6[1],zmm0[1],zmm6[3],zmm0[3],zmm6[5],zmm0[5],zmm6[7],zmm0[7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512BW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm8, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm26, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm1, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm26, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
 ; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512BW-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm1, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm4
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm21, %zmm5, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm5, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
+; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm4, %zmm3
 ; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm5, %zmm31
+; AVX512BW-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm4
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512BW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm2, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm7, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm5, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm3, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm4
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm7, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm5, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 384(%r10), %zmm19
+; AVX512BW-NEXT:    vmovdqa64 384(%rax), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm7
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512BW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm30, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 384(%r8), %zmm12
+; AVX512BW-NEXT:    vmovdqa64 384(%r9), %zmm4
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm20
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm11, %zmm20
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm20 {%k1} = zmm19[0],zmm0[0],zmm19[2],zmm0[2],zmm19[4],zmm0[4],zmm19[6],zmm0[6]
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm18
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm29
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm18
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm18 {%k1} = zmm19[1],zmm0[1],zmm19[3],zmm0[3],zmm19[5],zmm0[5],zmm19[7],zmm0[7]
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm19
+; AVX512BW-NEXT:    vmovdqa64 448(%r10), %zmm16
+; AVX512BW-NEXT:    vmovdqa64 448(%rax), %zmm1
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm16, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm16, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm16, %zmm30
+; AVX512BW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm16, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 448(%r8), %zmm9
+; AVX512BW-NEXT:    vmovdqa64 448(%r9), %zmm3
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm11
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm11 {%k1} = zmm16[0],zmm1[0],zmm16[2],zmm1[2],zmm16[4],zmm1[4],zmm16[6],zmm1[6]
+; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm0
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 {%k1} = zmm16[1],zmm1[1],zmm16[3],zmm1[3],zmm16[5],zmm1[5],zmm16[7],zmm1[7]
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm16, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm16
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512BW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm30
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm15, %zmm30
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512BW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm29
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm29
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [2,10,2,10,2,10,2,10]
+; AVX512BW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm7
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm5, %zmm7
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [3,11,3,11,3,11,3,11]
+; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpermt2q %zmm28, %zmm6, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm28
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm15, %zmm28
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm31
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm8
+; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm22
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm15, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm14, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm5, %zmm10
+; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm6, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm1
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm11
+; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm24
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm15, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm14, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm5, %zmm13
+; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm6, %zmm26
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm15, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm14, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm5, %zmm25
+; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm6, %zmm27
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm17
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm5, %zmm17
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm12[0],zmm4[0],zmm12[2],zmm4[2],zmm12[4],zmm4[4],zmm12[6],zmm4[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm12[1],zmm4[1],zmm12[3],zmm4[3],zmm12[5],zmm4[5],zmm12[7],zmm4[7]
+; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm12
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm15
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm14
+; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm9, %zmm5
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm9[0],zmm3[0],zmm9[2],zmm3[2],zmm9[4],zmm3[4],zmm9[6],zmm3[6]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 = zmm9[1],zmm3[1],zmm9[3],zmm3[3],zmm9[5],zmm3[5],zmm9[7],zmm3[7]
+; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm6, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm0 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm18, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vinsertf64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k1}
 ; AVX512BW-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512BW-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm7, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm21, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 64(%rcx), %ymm0
 ; AVX512BW-NEXT:    vmovdqa 64(%rdx), %ymm1
 ; AVX512BW-NEXT:    vmovdqa 64(%rsi), %ymm2
 ; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm6[2,3],ymm4[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm8, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm22 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 128(%rcx), %ymm0
 ; AVX512BW-NEXT:    vmovdqa 128(%rdx), %ymm1
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512BW-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm23, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 192(%rcx), %ymm0
 ; AVX512BW-NEXT:    vmovdqa 192(%rdx), %ymm1
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512BW-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm11, %zmm2
 ; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm24 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm0
+; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 256(%rcx), %ymm0
 ; AVX512BW-NEXT:    vmovdqa 256(%rdx), %ymm1
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; AVX512BW-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
+; AVX512BW-NEXT:    vmovdqa 256(%rdi), %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm6[2,3],ymm2[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm20
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512BW-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512BW-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512BW-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512BW-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512BW-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm26 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm0, %zmm26, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
+; AVX512BW-NEXT:    vmovdqa 320(%rcx), %ymm1
+; AVX512BW-NEXT:    vmovdqa 320(%rdx), %ymm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512BW-NEXT:    vmovdqa 320(%rsi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 320(%rdi), %ymm6
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm7[2,3],ymm3[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm25, %zmm7
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm13
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
+; AVX512BW-NEXT:    vmovdqa 384(%rcx), %ymm1
+; AVX512BW-NEXT:    vmovdqa 384(%rdx), %ymm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512BW-NEXT:    vmovdqa 384(%rsi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 384(%rdi), %ymm6
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm8[2,3],ymm3[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm17, %zmm25
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm12 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm19
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa 448(%rcx), %ymm1
+; AVX512BW-NEXT:    vmovdqa 448(%rdx), %ymm2
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512BW-NEXT:    vmovdqa 448(%rsi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 448(%rdi), %ymm6
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm8[2,3],ymm3[2,3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm5, %zmm10
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm9 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm11
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm30 {%k1}
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512BW-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm30, %zmm8
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm29 {%k1}
 ; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512BW-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512BW-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm28 {%k1}
+; AVX512BW-NEXT:    vmovdqa 64(%rsi), %xmm2
+; AVX512BW-NEXT:    vinserti128 $1, 64(%rcx), %ymm2, %ymm3
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm2
+; AVX512BW-NEXT:    vinserti128 $1, 64(%rdx), %ymm2, %ymm4
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm2, %zmm28, %zmm2
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm31 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm3, %zmm31, %zmm3
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqa 128(%rsi), %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, 128(%rcx), %ymm4, %ymm4
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %xmm5
+; AVX512BW-NEXT:    vinserti128 $1, 128(%rdx), %ymm5, %ymm6
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm5, %zmm9, %zmm5
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512BW-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512BW-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512BW-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm12 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm12, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm16 {%k1}
+; AVX512BW-NEXT:    vmovdqa 192(%rsi), %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rcx), %ymm4, %ymm4
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %xmm6
+; AVX512BW-NEXT:    vinserti128 $1, 192(%rdx), %ymm6, %ymm9
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm9[0],ymm4[0],ymm9[2],ymm4[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm6, %zmm16, %zmm6
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm17 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm9[1],ymm4[1],ymm9[3],ymm4[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm4, %zmm17, %zmm4
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512BW-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512BW-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512BW-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k1}
+; AVX512BW-NEXT:    vmovdqa 256(%rsi), %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, 256(%rcx), %ymm9, %ymm9
+; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %xmm16
+; AVX512BW-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm16, %ymm16
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm17 = ymm16[0],ymm9[0],ymm16[2],ymm9[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm17, %zmm18, %zmm17
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512BW-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512BW-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512BW-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm21 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm16[1],ymm9[1],ymm16[3],ymm9[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm21, %zmm18
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k1}
+; AVX512BW-NEXT:    vmovdqa 320(%rsi), %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, 320(%rcx), %ymm9, %ymm9
+; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %xmm16
+; AVX512BW-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm16, %ymm16
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm21 = ymm16[0],ymm9[0],ymm16[2],ymm9[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm21, %zmm22, %zmm21
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512BW-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512BW-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512BW-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512BW-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512BW-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512BW-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512BW-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512BW-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm23 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm16[1],ymm9[1],ymm16[3],ymm9[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm9, %zmm23, %zmm9
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm24 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %xmm16
+; AVX512BW-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm16, %ymm16
+; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %xmm22
+; AVX512BW-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm22, %ymm22
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm23 = ymm22[0],ymm16[0],ymm22[2],ymm16[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm23, %zmm24, %zmm23
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm26 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm16 = ymm22[1],ymm16[1],ymm22[3],ymm16[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm16, %zmm26, %zmm16
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %xmm22
+; AVX512BW-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm22, %ymm22
+; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %xmm24
+; AVX512BW-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm24, %ymm24
+; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} ymm26 = ymm24[0],ymm22[0],ymm24[2],ymm22[2]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm26, %zmm15, %zmm15
+; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm14 {%k1}
+; AVX512BW-NEXT:    vpunpckhqdq {{.*#+}} ymm22 = ymm24[1],ymm22[1],ymm24[3],ymm22[3]
+; AVX512BW-NEXT:    vinserti64x4 $0, %ymm22, %zmm14, %zmm14
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm11, 3776(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, 3712(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, 3264(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm25, 3200(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, 2752(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm7, 2688(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, 2240(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm20, 2176(%rax)
+; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovaps %zmm0, 1728(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1664(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1152(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 704(%rax)
@@ -10776,8 +10913,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 3904(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, 3648(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm15, 3584(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 3520(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10786,8 +10923,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 3392(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm16, 3136(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm23, 3072(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 3008(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10796,8 +10933,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 2880(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, 2624(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 2560(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 2496(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10806,8 +10943,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 2368(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, 2112(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm17, 2048(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1984(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10816,8 +10953,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1856(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm4, 1600(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 1536(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1472(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10826,8 +10963,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1344(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, 1088(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 1024(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 960(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10836,8 +10973,8 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 832(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, 576(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 512(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 448(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -10846,9 +10983,9 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-NEXT:    vmovaps %zmm0, 320(%rax)
 ; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-NEXT:    addq $5512, %rsp # imm = 0x1588
+; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512BW-NEXT:    addq $5704, %rsp # imm = 0x1648
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <64 x i64>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index a4f21ea2a9154..10cd41bf25a69 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -684,7 +684,7 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
 ; AVX512F-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,2,4,6,0,2,4,6]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,2,4,0,0,2,4,0]
 ; AVX512F-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,4,8],zero,zero,zero,zero,ymm0[1,5,9],zero,zero,zero,zero,ymm0[18,22,26],zero,zero,zero,zero,ymm0[19,23,27],zero,zero,zero,zero
@@ -777,7 +777,7 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
 ; AVX512BW-FAST-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,6,0,2,4,6]
+; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,0,0,2,4,0]
 ; AVX512BW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm1
 ; AVX512BW-FAST-NEXT:    movl $236730480, %ecx # imm = 0xE1C3870
@@ -7664,13 +7664,12 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $1256, %rsp # imm = 0x4E8
+; AVX512F-FAST-NEXT:    subq $1288, %rsp # imm = 0x508
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm8
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm13
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
@@ -7697,12 +7696,14 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,zero,zero,ymm0[18]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm1
@@ -7718,87 +7719,86 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm25
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm30
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
 ; AVX512F-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512F-FAST-NEXT:    vporq %ymm3, %ymm5, %ymm24
+; AVX512F-FAST-NEXT:    vporq %ymm3, %ymm5, %ymm23
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm19
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm6, %xmm28
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm19
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm24
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm29
 ; AVX512F-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm14
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm9
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm10, %xmm27
-; AVX512F-FAST-NEXT:    vpor %xmm5, %xmm9, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm27
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm14, %xmm9
+; AVX512F-FAST-NEXT:    vpor %xmm5, %xmm9, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm15
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm10
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm15, %xmm9
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm21
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm12
 ; AVX512F-FAST-NEXT:    vporq %xmm9, %xmm12, %xmm22
 ; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm13, %ymm20
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm14, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
 ; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm2, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm7
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm7
 ; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm7
 ; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
 ; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm13
 ; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm2
 ; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm2, %xmm31
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm14
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm24, %xmm2
 ; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm2
 ; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22],zero,ymm2[20],zero,zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm11
@@ -7808,37 +7808,37 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
 ; AVX512F-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm30
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm30
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
 ; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [2,2,3,3,2,2,3,3]
-; AVX512F-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512F-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm6, %ymm0
 ; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm24
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
@@ -7855,7 +7855,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7863,9 +7863,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm28
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm6, %xmm1
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm14[8],xmm0[9],xmm14[9],xmm0[10],xmm14[10],xmm0[11],xmm14[11],xmm0[12],xmm14[12],xmm0[13],xmm14[13],xmm0[14],xmm14[14],xmm0[15],xmm14[15]
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm0
@@ -7880,24 +7879,24 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm0
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm29
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm4
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm10
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm30
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7910,7 +7909,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm16
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm4
 ; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
 ; AVX512F-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
@@ -7923,7 +7922,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm31, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm12[0],xmm8[1],xmm12[1],xmm8[2],xmm12[2],xmm8[3],xmm12[3],xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
 ; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm13
@@ -7931,44 +7930,44 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
+; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm14[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512F-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm14
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm14
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm13
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpor %ymm12, %ymm9, %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm9, %zmm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm9, %zmm2
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
 ; AVX512F-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm9, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm9, %ymm6, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm0, %zmm7
-; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm10, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm4
-; AVX512F-FAST-NEXT:    vpor %ymm8, %ymm14, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm7
+; AVX512F-FAST-NEXT:    vpor %ymm8, %ymm10, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512F-FAST-NEXT:    vpor %ymm11, %ymm14, %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm5
 ; AVX512F-FAST-NEXT:    vpandq %ymm9, %ymm22, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm20, %zmm0
 ; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
@@ -8021,7 +8020,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
 ; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
@@ -8095,7 +8094,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, 64(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
-; AVX512F-FAST-NEXT:    addq $1256, %rsp # imm = 0x4E8
+; AVX512F-FAST-NEXT:    addq $1288, %rsp # imm = 0x508
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
index 257c9a2e4eaa5..1703dae60a6c2 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
@@ -5735,7 +5735,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-SLOW-LABEL: store_i8_stride8_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
+; AVX512F-SLOW-NEXT:    subq $680, %rsp # imm = 0x2A8
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
@@ -5762,8 +5762,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa 48(%rax), %xmm5
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
@@ -5771,10 +5771,10 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%r9), %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa 48(%r9), %xmm7
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%r8), %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa 48(%r8), %xmm12
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm0[2,1,3,3,4,5,6,7]
@@ -5782,7 +5782,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm0[0,2,2,3,4,5,6,7]
@@ -5790,8 +5791,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm9
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm0[0,2,2,3,4,5,6,7]
@@ -5801,69 +5801,70 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[2,1,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm14, %ymm28
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r10), %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm2, %ymm11
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm2, %ymm11
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %xmm2
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm14, %ymm31
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm13, %ymm31
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,0,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm24
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm28
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm22
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm23
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,1,1,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm20
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm21
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm18
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm20
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm5, %ymm30
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm30
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm29
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm5, %ymm23
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm24
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm21
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm22
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm12[2,1,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm14, %ymm13, %ymm19
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm12[0,1,2,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm13, %ymm17
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm7[2,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm13, %ymm12, %ymm19
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm7[0,1,2,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,7,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm12, %ymm18
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm10, %ymm4
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,0,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm7, %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm7, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
@@ -5883,9 +5884,9 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rcx), %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%rcx), %xmm14
 ; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdx), %xmm12
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
@@ -5898,7 +5899,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa 16(%rax), %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm17
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
@@ -5907,19 +5908,20 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%r9), %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%r8), %xmm14
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%r9), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%r8), %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm27
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
@@ -5928,60 +5930,61 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm0[3,3,3,3]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[3,3,3,3]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm6, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm10, %ymm3
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm5 = mem[2,1,3,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm6
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm3, %zmm6
-; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm10
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} zmm13 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm13, %zmm10
+; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm4
+; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,0,2,1,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpandnq %zmm4, %zmm8, %zmm4
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm5, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpandnq %zmm4, %zmm3, %zmm4
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm5, %zmm5
 ; AVX512F-SLOW-NEXT:    movw $-21846, %ax # imm = 0xAAAA
 ; AVX512F-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-SLOW-NEXT:    vpord %zmm4, %zmm5, %zmm6 {%k1}
+; AVX512F-SLOW-NEXT:    vpord %zmm4, %zmm5, %zmm10 {%k1}
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm2
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm8, %ymm0
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm4 = mem[2,1,3,3,6,5,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[2,1,3,3,6,5,7,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm9
 ; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,0,2,1,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,2,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpshufd $96, (%rsp), %ymm2 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm28[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd $232, (%rsp), %ymm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,2,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpandnq %zmm0, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vpord %zmm0, %zmm2, %zmm9 {%k1}
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -5996,154 +5999,155 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm31[2,1,3,3,6,5,7,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm11
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm11
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm11
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm28[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,2,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm21[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm20[0,2,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vpandnq %zmm0, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm1, %zmm1
 ; AVX512F-SLOW-NEXT:    vpord %zmm0, %zmm1, %zmm11 {%k1}
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,6,5,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,5,7,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,2,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm1, %ymm0, %ymm28
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[3,3,3,3]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm7, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm5
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm6, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm29[2,1,3,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm3, %zmm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm23[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm17[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vpandnq %zmm5, %zmm8, %zmm5
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vpord %zmm5, %zmm7, %zmm4 {%k1}
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm5, %ymm18
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm5
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm21
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm29[2,1,3,3,6,5,7,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm22[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm18[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpord %zmm0, %zmm6, %zmm4 {%k1}
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm5, %ymm22
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[2,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm18
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm6, %ymm2, %ymm21
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm2, %ymm22
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,3,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm2, %ymm26
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm7, %ymm19
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm5 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm10, %ymm14
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm5[0,1,2,3,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm10, %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm5 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[0,0,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm13, %ymm10, %ymm17
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm5[0,1,2,3,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm10, %ymm20
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm6, %ymm7, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm8, %ymm14
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm8, %ymm17
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm8, %ymm20
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # xmm1 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rsi), %xmm10
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm13
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm13[8],xmm10[8],xmm13[9],xmm10[9],xmm13[10],xmm10[10],xmm13[11],xmm10[11],xmm13[12],xmm10[12],xmm13[13],xmm10[13],xmm13[14],xmm10[14],xmm13[15],xmm10[15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm5[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm7, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%rsi), %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm12
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm23 = xmm5[3,3,3,3]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm23, %ymm13, %ymm13
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm23, %ymm12, %ymm12
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm5, %ymm23, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm5, %zmm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm12
 ; AVX512F-SLOW-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm23 = mem[2,1,3,3,6,5,7,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm3, %zmm5
-; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm13 = mem[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm12, %zmm13, %zmm5
+; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm12 = mem[0,0,2,1,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm23 = mem[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm13, %zmm13
-; AVX512F-SLOW-NEXT:    vpandnq %zmm13, %zmm8, %zmm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpandnq %zmm12, %zmm3, %zmm12
 ; AVX512F-SLOW-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm23 = mem[0,0,2,1,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm24 = mem[0,2,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm23
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm23, %zmm23
-; AVX512F-SLOW-NEXT:    vpord %zmm13, %zmm23, %zmm5 {%k1}
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm23 = xmm12[3,3,3,3]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm24 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,1,1]
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm23, %zmm23
+; AVX512F-SLOW-NEXT:    vpord %zmm12, %zmm23, %zmm5 {%k1}
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm23 = xmm0[3,3,3,3]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm24 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm23, %ymm13, %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm6
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm24, %ymm12
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm23 = xmm10[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm24 = xmm10[3,3,3,3]
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm29 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,1,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
-; AVX512F-SLOW-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm13 = mem[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm23, %ymm12, %ymm12
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm24, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm23 = xmm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm24 = xmm7[3,3,3,3]
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm29 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm12 = mem[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm30 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm30 = mem[2,1,3,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm13, %zmm13
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm12, %zmm3, %zmm13
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm30 = xmm2[3,3,3,3]
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm31 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
@@ -6157,57 +6161,57 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm16 = mem[0,2,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm27, %zmm16
-; AVX512F-SLOW-NEXT:    vpandnq %zmm25, %zmm8, %zmm25
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm16, %zmm16
-; AVX512F-SLOW-NEXT:    vpord %zmm25, %zmm16, %zmm13 {%k1}
+; AVX512F-SLOW-NEXT:    vpandnq %zmm25, %zmm3, %zmm25
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm16, %zmm16
+; AVX512F-SLOW-NEXT:    vpord %zmm25, %zmm16, %zmm12 {%k1}
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm16 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm24[0],zero,zero,zero,xmm24[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm23, %ymm16, %ymm16
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm10, %ymm29, %ymm10
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm7, %ymm29, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm7, %zmm7
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm16 = ymm28[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm18 = ymm18[2,1,3,3,6,5,7,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm16, %zmm16
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm10, %zmm3, %zmm16
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm21[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm7, %zmm13, %zmm16
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,0,2,1,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm18 = ymm22[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm7, %zmm7
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm18 = ymm26[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm18, %zmm7
-; AVX512F-SLOW-NEXT:    vpandnq %zmm10, %zmm8, %zmm10
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vpord %zmm10, %zmm7, %zmm16 {%k1}
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm30[0],zero,zero,zero,xmm30[1],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm7, %ymm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512F-SLOW-NEXT:    vpandnq %zmm7, %zmm3, %zmm7
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpord %zmm7, %zmm6, %zmm16 {%k1}
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm30[0],zero,zero,zero,xmm30[1],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm31, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm15[2,1,3,3,6,5,7,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm3, %zmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[2,1,3,3,6,5,7,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm17[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm8[0,0,2,1,4,4,6,5]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpandnq %zmm2, %zmm8, %zmm1
-; AVX512F-SLOW-NEXT:    vpandq %zmm8, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpord %zmm1, %zmm0, %zmm7 {%k1}
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512F-SLOW-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpandq %zmm3, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpord %zmm0, %zmm1, %zmm2 {%k1}
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm13, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, 448(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, 384(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%rax)
-; AVX512F-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 64(%rax)
+; AVX512F-SLOW-NEXT:    addq $680, %rsp # imm = 0x2A8
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -6369,22 +6373,22 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm12
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm9
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm2
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,12,13,u,u,u,u,u,u,14,15,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm4
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpandnq %zmm19, %zmm0, %zmm19
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm21, %zmm21
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,12,13,u,u,u,u,u,u,14,15,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm3
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} zmm7 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm3 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} zmm2 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpandnq %zmm19, %zmm2, %zmm19
+; AVX512F-FAST-NEXT:    vpandq %zmm2, %zmm21, %zmm21
 ; AVX512F-FAST-NEXT:    movw $-21846, %ax # imm = 0xAAAA
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vpord %zmm19, %zmm21, %zmm4 {%k1}
+; AVX512F-FAST-NEXT:    vpord %zmm19, %zmm21, %zmm3 {%k1}
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # xmm15 = xmm15[0],mem[0],xmm15[1],mem[1],xmm15[2],mem[2],xmm15[3],mem[3],xmm15[4],mem[4],xmm15[5],mem[5],xmm15[6],mem[6],xmm15[7],mem[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm15, %ymm15
@@ -6394,110 +6398,110 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa 48(%rsi), %xmm15
 ; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm8
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3],xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm6, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm0
 ; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
-; AVX512F-FAST-NEXT:    vinserti32x4 $1, %xmm1, %ymm21, %ymm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $1, %xmm0, %ymm21, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm6, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
-; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm15 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm8
+; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm15 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm15, %ymm8
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm15
-; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm1 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3],xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm13
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm15
+; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm0 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm0 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3],xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm13
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm11, %xmm14
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3],xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm11, %xmm14
 ; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti32x4 $1, %xmm14, %ymm21, %ymm14
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm11, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm11, %ymm11
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm11, %ymm11
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm11
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm1[8],xmm8[9],xmm1[9],xmm8[10],xmm1[10],xmm8[11],xmm1[11],xmm8[12],xmm1[12],xmm8[13],xmm1[13],xmm8[14],xmm1[14],xmm8[15],xmm1[15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
-; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm14 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm8
+; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm14 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm14, %ymm8
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm21
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm21
 ; AVX512F-FAST-NEXT:    vmovdqa 16(%rsi), %xmm8
 ; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm10
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm2
-; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm17 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512F-FAST-NEXT:    vinserti32x4 $1, %xmm2, %ymm17, %ymm2
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm8
-; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
+; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm17 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512F-FAST-NEXT:    vinserti32x4 $1, %xmm1, %ymm17, %ymm1
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm8
+; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm10, %ymm8
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3],xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm8, %ymm9
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
 ; AVX512F-FAST-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpandnq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpord %zmm7, %zmm8, %zmm6 {%k1}
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm15 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpandnq (%rsp), %zmm0, %zmm7 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpord %zmm7, %zmm8, %zmm15 {%k1}
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm20, %zmm3, %zmm11
-; AVX512F-FAST-NEXT:    vpandnq %zmm22, %zmm0, %zmm7
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm23, %zmm8
-; AVX512F-FAST-NEXT:    vpord %zmm7, %zmm8, %zmm11 {%k1}
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm3, %zmm14
-; AVX512F-FAST-NEXT:    vpandnq %zmm25, %zmm0, %zmm7
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm26, %zmm8
-; AVX512F-FAST-NEXT:    vpord %zmm7, %zmm8, %zmm14 {%k1}
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm27, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vpandnq %zmm28, %zmm0, %zmm7
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm29, %zmm8
-; AVX512F-FAST-NEXT:    vpord %zmm7, %zmm8, %zmm1 {%k1}
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm30, %zmm3, %zmm2
-; AVX512F-FAST-NEXT:    vpandnq %zmm31, %zmm0, %zmm7
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm16, %zmm8
-; AVX512F-FAST-NEXT:    vpord %zmm7, %zmm8, %zmm2 {%k1}
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm3, %zmm5
-; AVX512F-FAST-NEXT:    vpandnq %zmm13, %zmm0, %zmm3
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm21, %zmm0
-; AVX512F-FAST-NEXT:    vpord %zmm3, %zmm0, %zmm5 {%k1}
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm8, %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm6 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpandnq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm8, %zmm6 {%k1}
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm15 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpandnq (%rsp), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm8, %zmm15 {%k1}
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm20, %zmm7, %zmm11
+; AVX512F-FAST-NEXT:    vpandnq %zmm22, %zmm2, %zmm5
+; AVX512F-FAST-NEXT:    vpandq %zmm2, %zmm23, %zmm8
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm8, %zmm11 {%k1}
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm7, %zmm14
+; AVX512F-FAST-NEXT:    vpandnq %zmm25, %zmm2, %zmm5
+; AVX512F-FAST-NEXT:    vpandq %zmm2, %zmm26, %zmm8
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm8, %zmm14 {%k1}
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm27, %zmm7, %zmm0
+; AVX512F-FAST-NEXT:    vpandnq %zmm28, %zmm2, %zmm5
+; AVX512F-FAST-NEXT:    vpandq %zmm2, %zmm29, %zmm8
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm8, %zmm0 {%k1}
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm30, %zmm7, %zmm1
+; AVX512F-FAST-NEXT:    vpandnq %zmm31, %zmm2, %zmm5
+; AVX512F-FAST-NEXT:    vpandq %zmm2, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm8, %zmm1 {%k1}
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm7, %zmm4
+; AVX512F-FAST-NEXT:    vpandnq %zmm13, %zmm2, %zmm5
+; AVX512F-FAST-NEXT:    vpandq %zmm2, %zmm21, %zmm2
+; AVX512F-FAST-NEXT:    vpord %zmm5, %zmm2, %zmm4 {%k1}
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 320(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 448(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 64(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
 ; AVX512F-FAST-NEXT:    addq $392, %rsp # imm = 0x188
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll
index 0fe759f3c4310..9d177513b0aa7 100644
--- a/llvm/test/CodeGen/X86/vector-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-mul.ll
@@ -343,7 +343,8 @@ define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
 ;
 ; X64-AVX512DQ-LABEL: mul_v4i32_17:
 ; X64-AVX512DQ:       # %bb.0:
-; X64-AVX512DQ-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512DQ-NEXT:    vpslld $4, %xmm0, %xmm1
+; X64-AVX512DQ-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; X64-AVX512DQ-NEXT:    retq
   %1 = mul <4 x i32> %a0, <i32 17, i32 17, i32 17, i32 17>
   ret <4 x i32> %1
@@ -471,7 +472,8 @@ define <8 x i32> @mul_v8i32_17(<8 x i32> %a0) nounwind {
 ;
 ; X64-AVX512DQ-LABEL: mul_v8i32_17:
 ; X64-AVX512DQ:       # %bb.0:
-; X64-AVX512DQ-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512DQ-NEXT:    vpslld $4, %ymm0, %ymm1
+; X64-AVX512DQ-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; X64-AVX512DQ-NEXT:    retq
   %1 = mul <8 x i32> %a0, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
   ret <8 x i32> %1
@@ -606,7 +608,10 @@ define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind {
 ;
 ; X64-AVX512DQ-LABEL: mul_v4i32_neg33:
 ; X64-AVX512DQ:       # %bb.0:
-; X64-AVX512DQ-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512DQ-NEXT:    vpslld $5, %xmm0, %xmm1
+; X64-AVX512DQ-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; X64-AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX512DQ-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; X64-AVX512DQ-NEXT:    retq
   %1 = mul <4 x i32> %a0, <i32 -33, i32 -33, i32 -33, i32 -33>
   ret <4 x i32> %1
@@ -768,7 +773,10 @@ define <8 x i32> @mul_v8i32_neg33(<8 x i32> %a0) nounwind {
 ;
 ; X64-AVX512DQ-LABEL: mul_v8i32_neg33:
 ; X64-AVX512DQ:       # %bb.0:
-; X64-AVX512DQ-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512DQ-NEXT:    vpslld $5, %ymm0, %ymm1
+; X64-AVX512DQ-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; X64-AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX512DQ-NEXT:    vpsubd %ymm0, %ymm1, %ymm0
 ; X64-AVX512DQ-NEXT:    retq
   %1 = mul <8 x i32> %a0, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
   ret <8 x i32> %1
@@ -1118,7 +1126,8 @@ define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
 ;
 ; X64-AVX512DQ-LABEL: mul_v4i32_7:
 ; X64-AVX512DQ:       # %bb.0:
-; X64-AVX512DQ-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512DQ-NEXT:    vpslld $3, %xmm0, %xmm1
+; X64-AVX512DQ-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; X64-AVX512DQ-NEXT:    retq
   %1 = mul <4 x i32> %a0, <i32 7, i32 7, i32 7, i32 7>
   ret <4 x i32> %1
@@ -1227,7 +1236,8 @@ define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind {
 ;
 ; X64-AVX512DQ-LABEL: mul_v4i32_neg63:
 ; X64-AVX512DQ:       # %bb.0:
-; X64-AVX512DQ-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512DQ-NEXT:    vpslld $6, %xmm0, %xmm1
+; X64-AVX512DQ-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; X64-AVX512DQ-NEXT:    retq
   %1 = mul <4 x i32> %a0, <i32 -63, i32 -63, i32 -63, i32 -63>
   ret <4 x i32> %1
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll b/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll
index 364dc185d26c2..1930fb770cd4a 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-128-ult-ugt.ll
@@ -5795,8 +5795,7 @@ define <4 x i32> @ugt_1_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_1_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_1_v4i32:
@@ -6041,8 +6040,7 @@ define <4 x i32> @ugt_2_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_2_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_2_v4i32:
@@ -6069,8 +6067,7 @@ define <4 x i32> @ugt_2_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 2, i32 2, i32 2, i32 2>
@@ -6415,8 +6412,7 @@ define <4 x i32> @ugt_3_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_3_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_3_v4i32:
@@ -6443,8 +6439,7 @@ define <4 x i32> @ugt_3_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 3, i32 3, i32 3, i32 3>
@@ -6789,8 +6784,7 @@ define <4 x i32> @ugt_4_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_4_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4,4,4,4]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_4_v4i32:
@@ -6817,8 +6811,7 @@ define <4 x i32> @ugt_4_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4,4,4,4]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 4, i32 4, i32 4, i32 4>
@@ -7163,8 +7156,7 @@ define <4 x i32> @ugt_5_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_5_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [5,5,5,5]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_5_v4i32:
@@ -7191,8 +7183,7 @@ define <4 x i32> @ugt_5_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [5,5,5,5]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
@@ -7537,8 +7528,7 @@ define <4 x i32> @ugt_6_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_6_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [6,6,6,6]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_6_v4i32:
@@ -7565,8 +7555,7 @@ define <4 x i32> @ugt_6_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [6,6,6,6]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 6, i32 6, i32 6, i32 6>
@@ -7911,8 +7900,7 @@ define <4 x i32> @ugt_7_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_7_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_7_v4i32:
@@ -7939,8 +7927,7 @@ define <4 x i32> @ugt_7_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 7, i32 7, i32 7, i32 7>
@@ -8285,8 +8272,7 @@ define <4 x i32> @ugt_8_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_8_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [8,8,8,8]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_8_v4i32:
@@ -8313,8 +8299,7 @@ define <4 x i32> @ugt_8_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [8,8,8,8]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 8, i32 8, i32 8, i32 8>
@@ -8659,8 +8644,7 @@ define <4 x i32> @ugt_9_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_9_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_9_v4i32:
@@ -8687,8 +8671,7 @@ define <4 x i32> @ugt_9_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 9, i32 9, i32 9, i32 9>
@@ -9033,8 +9016,7 @@ define <4 x i32> @ugt_10_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_10_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [10,10,10,10]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_10_v4i32:
@@ -9061,8 +9043,7 @@ define <4 x i32> @ugt_10_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [10,10,10,10]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 10, i32 10, i32 10, i32 10>
@@ -9407,8 +9388,7 @@ define <4 x i32> @ugt_11_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_11_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [11,11,11,11]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_11_v4i32:
@@ -9435,8 +9415,7 @@ define <4 x i32> @ugt_11_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [11,11,11,11]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 11, i32 11, i32 11, i32 11>
@@ -9781,8 +9760,7 @@ define <4 x i32> @ugt_12_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_12_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [12,12,12,12]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_12_v4i32:
@@ -9809,8 +9787,7 @@ define <4 x i32> @ugt_12_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [12,12,12,12]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 12, i32 12, i32 12, i32 12>
@@ -10155,8 +10132,7 @@ define <4 x i32> @ugt_13_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_13_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [13,13,13,13]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_13_v4i32:
@@ -10183,8 +10159,7 @@ define <4 x i32> @ugt_13_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [13,13,13,13]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 13, i32 13, i32 13, i32 13>
@@ -10529,8 +10504,7 @@ define <4 x i32> @ugt_14_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_14_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [14,14,14,14]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_14_v4i32:
@@ -10557,8 +10531,7 @@ define <4 x i32> @ugt_14_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [14,14,14,14]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 14, i32 14, i32 14, i32 14>
@@ -10903,8 +10876,7 @@ define <4 x i32> @ugt_15_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_15_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_15_v4i32:
@@ -10931,8 +10903,7 @@ define <4 x i32> @ugt_15_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 15, i32 15, i32 15, i32 15>
@@ -11277,8 +11248,7 @@ define <4 x i32> @ugt_16_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_16_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_16_v4i32:
@@ -11305,8 +11275,7 @@ define <4 x i32> @ugt_16_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16>
@@ -11651,8 +11620,7 @@ define <4 x i32> @ugt_17_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_17_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_17_v4i32:
@@ -11679,8 +11647,7 @@ define <4 x i32> @ugt_17_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 17, i32 17, i32 17, i32 17>
@@ -12025,8 +11992,7 @@ define <4 x i32> @ugt_18_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_18_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [18,18,18,18]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_18_v4i32:
@@ -12053,8 +12019,7 @@ define <4 x i32> @ugt_18_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [18,18,18,18]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 18, i32 18, i32 18, i32 18>
@@ -12399,8 +12364,7 @@ define <4 x i32> @ugt_19_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_19_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [19,19,19,19]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_19_v4i32:
@@ -12427,8 +12391,7 @@ define <4 x i32> @ugt_19_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [19,19,19,19]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 19, i32 19, i32 19, i32 19>
@@ -12773,8 +12736,7 @@ define <4 x i32> @ugt_20_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_20_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_20_v4i32:
@@ -12801,8 +12763,7 @@ define <4 x i32> @ugt_20_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 20, i32 20, i32 20, i32 20>
@@ -13147,8 +13108,7 @@ define <4 x i32> @ugt_21_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_21_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [21,21,21,21]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_21_v4i32:
@@ -13175,8 +13135,7 @@ define <4 x i32> @ugt_21_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [21,21,21,21]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 21, i32 21, i32 21, i32 21>
@@ -13521,8 +13480,7 @@ define <4 x i32> @ugt_22_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_22_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [22,22,22,22]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_22_v4i32:
@@ -13549,8 +13507,7 @@ define <4 x i32> @ugt_22_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [22,22,22,22]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 22, i32 22, i32 22, i32 22>
@@ -13895,8 +13852,7 @@ define <4 x i32> @ugt_23_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_23_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [23,23,23,23]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_23_v4i32:
@@ -13923,8 +13879,7 @@ define <4 x i32> @ugt_23_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [23,23,23,23]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 23, i32 23, i32 23, i32 23>
@@ -14269,8 +14224,7 @@ define <4 x i32> @ugt_24_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_24_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [24,24,24,24]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_24_v4i32:
@@ -14297,8 +14251,7 @@ define <4 x i32> @ugt_24_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [24,24,24,24]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24>
@@ -14643,8 +14596,7 @@ define <4 x i32> @ugt_25_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_25_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [25,25,25,25]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_25_v4i32:
@@ -14671,8 +14623,7 @@ define <4 x i32> @ugt_25_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [25,25,25,25]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 25, i32 25, i32 25, i32 25>
@@ -15017,8 +14968,7 @@ define <4 x i32> @ugt_26_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_26_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [26,26,26,26]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_26_v4i32:
@@ -15045,8 +14995,7 @@ define <4 x i32> @ugt_26_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [26,26,26,26]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 26, i32 26, i32 26, i32 26>
@@ -15391,8 +15340,7 @@ define <4 x i32> @ugt_27_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_27_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [27,27,27,27]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_27_v4i32:
@@ -15419,8 +15367,7 @@ define <4 x i32> @ugt_27_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [27,27,27,27]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 27, i32 27, i32 27, i32 27>
@@ -15765,8 +15712,7 @@ define <4 x i32> @ugt_28_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_28_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [28,28,28,28]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_28_v4i32:
@@ -15793,8 +15739,7 @@ define <4 x i32> @ugt_28_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [28,28,28,28]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 28, i32 28, i32 28, i32 28>
@@ -16139,8 +16084,7 @@ define <4 x i32> @ugt_29_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_29_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [29,29,29,29]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_29_v4i32:
@@ -16167,8 +16111,7 @@ define <4 x i32> @ugt_29_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [29,29,29,29]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 29, i32 29, i32 29, i32 29>
@@ -16513,8 +16456,7 @@ define <4 x i32> @ugt_30_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_30_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [30,30,30,30]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_30_v4i32:
@@ -16541,8 +16483,7 @@ define <4 x i32> @ugt_30_v4i32(<4 x i32> %0) {
 ; BITALG-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; BITALG-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [30,30,30,30]
-; BITALG-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %0)
   %3 = icmp ugt <4 x i32> %2, <i32 30, i32 30, i32 30, i32 30>
@@ -16821,8 +16762,7 @@ define <2 x i64> @ugt_1_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_1_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_1_v2i64:
@@ -17078,8 +17018,7 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_2_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [2,2]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_2_v2i64:
@@ -17097,8 +17036,7 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [2,2]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 2, i64 2>
@@ -17395,8 +17333,7 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_3_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [3,3]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_3_v2i64:
@@ -17414,8 +17351,7 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [3,3]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 3, i64 3>
@@ -17712,8 +17648,7 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_4_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4,4]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_4_v2i64:
@@ -17731,8 +17666,7 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4,4]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 4, i64 4>
@@ -18029,8 +17963,7 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_5_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [5,5]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_5_v2i64:
@@ -18048,8 +17981,7 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [5,5]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 5, i64 5>
@@ -18346,8 +18278,7 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_6_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [6,6]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_6_v2i64:
@@ -18365,8 +18296,7 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [6,6]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 6, i64 6>
@@ -18663,8 +18593,7 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_7_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [7,7]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_7_v2i64:
@@ -18682,8 +18611,7 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [7,7]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 7, i64 7>
@@ -18980,8 +18908,7 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_8_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [8,8]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_8_v2i64:
@@ -18999,8 +18926,7 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [8,8]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 8, i64 8>
@@ -19297,8 +19223,7 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_9_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [9,9]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_9_v2i64:
@@ -19316,8 +19241,7 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [9,9]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 9, i64 9>
@@ -19614,8 +19538,7 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_10_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [10,10]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_10_v2i64:
@@ -19633,8 +19556,7 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [10,10]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 10, i64 10>
@@ -19931,8 +19853,7 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_11_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [11,11]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_11_v2i64:
@@ -19950,8 +19871,7 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [11,11]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 11, i64 11>
@@ -20248,8 +20168,7 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_12_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [12,12]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_12_v2i64:
@@ -20267,8 +20186,7 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [12,12]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 12, i64 12>
@@ -20565,8 +20483,7 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_13_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [13,13]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_13_v2i64:
@@ -20584,8 +20501,7 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [13,13]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 13, i64 13>
@@ -20882,8 +20798,7 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_14_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [14,14]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_14_v2i64:
@@ -20901,8 +20816,7 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [14,14]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 14, i64 14>
@@ -21199,8 +21113,7 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_15_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [15,15]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_15_v2i64:
@@ -21218,8 +21131,7 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [15,15]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 15, i64 15>
@@ -21516,8 +21428,7 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_16_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [16,16]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_16_v2i64:
@@ -21535,8 +21446,7 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [16,16]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 16, i64 16>
@@ -21833,8 +21743,7 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_17_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [17,17]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_17_v2i64:
@@ -21852,8 +21761,7 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [17,17]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 17, i64 17>
@@ -22150,8 +22058,7 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_18_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [18,18]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_18_v2i64:
@@ -22169,8 +22076,7 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [18,18]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 18, i64 18>
@@ -22467,8 +22373,7 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_19_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [19,19]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_19_v2i64:
@@ -22486,8 +22391,7 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [19,19]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 19, i64 19>
@@ -22784,8 +22688,7 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_20_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [20,20]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_20_v2i64:
@@ -22803,8 +22706,7 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [20,20]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 20, i64 20>
@@ -23101,8 +23003,7 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_21_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [21,21]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_21_v2i64:
@@ -23120,8 +23021,7 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [21,21]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 21, i64 21>
@@ -23418,8 +23318,7 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_22_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [22,22]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_22_v2i64:
@@ -23437,8 +23336,7 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [22,22]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 22, i64 22>
@@ -23735,8 +23633,7 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_23_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [23,23]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_23_v2i64:
@@ -23754,8 +23651,7 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [23,23]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 23, i64 23>
@@ -24052,8 +23948,7 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_24_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [24,24]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_24_v2i64:
@@ -24071,8 +23966,7 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [24,24]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 24, i64 24>
@@ -24369,8 +24263,7 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_25_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [25,25]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_25_v2i64:
@@ -24388,8 +24281,7 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [25,25]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 25, i64 25>
@@ -24686,8 +24578,7 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_26_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [26,26]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_26_v2i64:
@@ -24705,8 +24596,7 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [26,26]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 26, i64 26>
@@ -25003,8 +24893,7 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_27_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [27,27]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_27_v2i64:
@@ -25022,8 +24911,7 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [27,27]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 27, i64 27>
@@ -25320,8 +25208,7 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_28_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [28,28]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_28_v2i64:
@@ -25339,8 +25226,7 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [28,28]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 28, i64 28>
@@ -25637,8 +25523,7 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_29_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [29,29]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_29_v2i64:
@@ -25656,8 +25541,7 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [29,29]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 29, i64 29>
@@ -25954,8 +25838,7 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_30_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [30,30]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_30_v2i64:
@@ -25973,8 +25856,7 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [30,30]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 30, i64 30>
@@ -26271,8 +26153,7 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_31_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [31,31]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_31_v2i64:
@@ -26290,8 +26171,7 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [31,31]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 31, i64 31>
@@ -26588,8 +26468,7 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_32_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [32,32]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_32_v2i64:
@@ -26607,8 +26486,7 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [32,32]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 32, i64 32>
@@ -26905,8 +26783,7 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_33_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [33,33]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_33_v2i64:
@@ -26924,8 +26801,7 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [33,33]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 33, i64 33>
@@ -27222,8 +27098,7 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_34_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [34,34]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_34_v2i64:
@@ -27241,8 +27116,7 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [34,34]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 34, i64 34>
@@ -27539,8 +27413,7 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_35_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [35,35]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_35_v2i64:
@@ -27558,8 +27431,7 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [35,35]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 35, i64 35>
@@ -27856,8 +27728,7 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_36_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [36,36]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_36_v2i64:
@@ -27875,8 +27746,7 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [36,36]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 36, i64 36>
@@ -28173,8 +28043,7 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_37_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [37,37]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_37_v2i64:
@@ -28192,8 +28061,7 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [37,37]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 37, i64 37>
@@ -28490,8 +28358,7 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_38_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [38,38]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_38_v2i64:
@@ -28509,8 +28376,7 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [38,38]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 38, i64 38>
@@ -28807,8 +28673,7 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_39_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [39,39]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_39_v2i64:
@@ -28826,8 +28691,7 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [39,39]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 39, i64 39>
@@ -29124,8 +28988,7 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_40_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [40,40]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_40_v2i64:
@@ -29143,8 +29006,7 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [40,40]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 40, i64 40>
@@ -29441,8 +29303,7 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_41_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [41,41]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_41_v2i64:
@@ -29460,8 +29321,7 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [41,41]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 41, i64 41>
@@ -29758,8 +29618,7 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_42_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [42,42]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_42_v2i64:
@@ -29777,8 +29636,7 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [42,42]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 42, i64 42>
@@ -30075,8 +29933,7 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_43_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [43,43]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_43_v2i64:
@@ -30094,8 +29951,7 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [43,43]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 43, i64 43>
@@ -30392,8 +30248,7 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_44_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [44,44]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_44_v2i64:
@@ -30411,8 +30266,7 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [44,44]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 44, i64 44>
@@ -30709,8 +30563,7 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_45_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [45,45]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_45_v2i64:
@@ -30728,8 +30581,7 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [45,45]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 45, i64 45>
@@ -31026,8 +30878,7 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_46_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [46,46]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_46_v2i64:
@@ -31045,8 +30896,7 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [46,46]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 46, i64 46>
@@ -31343,8 +31193,7 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_47_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [47,47]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_47_v2i64:
@@ -31362,8 +31211,7 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [47,47]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 47, i64 47>
@@ -31660,8 +31508,7 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_48_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [48,48]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_48_v2i64:
@@ -31679,8 +31526,7 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [48,48]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 48, i64 48>
@@ -31977,8 +31823,7 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_49_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [49,49]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_49_v2i64:
@@ -31996,8 +31841,7 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [49,49]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 49, i64 49>
@@ -32294,8 +32138,7 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_50_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [50,50]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_50_v2i64:
@@ -32313,8 +32156,7 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [50,50]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 50, i64 50>
@@ -32611,8 +32453,7 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_51_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [51,51]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_51_v2i64:
@@ -32630,8 +32471,7 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [51,51]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 51, i64 51>
@@ -32928,8 +32768,7 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_52_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [52,52]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_52_v2i64:
@@ -32947,8 +32786,7 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [52,52]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 52, i64 52>
@@ -33245,8 +33083,7 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_53_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [53,53]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_53_v2i64:
@@ -33264,8 +33101,7 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [53,53]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 53, i64 53>
@@ -33562,8 +33398,7 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_54_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [54,54]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_54_v2i64:
@@ -33581,8 +33416,7 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [54,54]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 54, i64 54>
@@ -33879,8 +33713,7 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_55_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [55,55]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_55_v2i64:
@@ -33898,8 +33731,7 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [55,55]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 55, i64 55>
@@ -34196,8 +34028,7 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_56_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [56,56]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_56_v2i64:
@@ -34215,8 +34046,7 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [56,56]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 56, i64 56>
@@ -34513,8 +34343,7 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_57_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [57,57]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_57_v2i64:
@@ -34532,8 +34361,7 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [57,57]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 57, i64 57>
@@ -34830,8 +34658,7 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_58_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [58,58]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_58_v2i64:
@@ -34849,8 +34676,7 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [58,58]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 58, i64 58>
@@ -35147,8 +34973,7 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_59_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [59,59]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_59_v2i64:
@@ -35166,8 +34991,7 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [59,59]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 59, i64 59>
@@ -35464,8 +35288,7 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_60_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [60,60]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_60_v2i64:
@@ -35483,8 +35306,7 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [60,60]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 60, i64 60>
@@ -35781,8 +35603,7 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_61_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [61,61]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_61_v2i64:
@@ -35800,8 +35621,7 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [61,61]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 61, i64 61>
@@ -36098,8 +35918,7 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_62_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [62,62]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_62_v2i64:
@@ -36117,8 +35936,7 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %xmm0, %xmm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [62,62]
-; BITALG-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; BITALG-NEXT:    retq
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %0)
   %3 = icmp ugt <2 x i64> %2, <i64 62, i64 62>
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-128.ll b/llvm/test/CodeGen/X86/vector-popcnt-128.ll
index 58cacfb0485ec..f23f28cdebb41 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-128.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-128.ll
@@ -764,8 +764,7 @@ define <2 x i64> @eq_1_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: eq_1_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: eq_1_v2i64:
@@ -845,8 +844,7 @@ define <2 x i64> @ne_1_v2i64(<2 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ne_1_v2i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
@@ -943,8 +941,7 @@ define <4 x i32> @eq_1_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: eq_1_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: eq_1_v4i32:
@@ -1048,8 +1045,7 @@ define <4 x i32> @ne_1_v4i32(<4 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ne_1_v4i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %xmm0, %xmm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-256-ult-ugt.ll b/llvm/test/CodeGen/X86/vector-popcnt-256-ult-ugt.ll
index c1a248fadd9c7..1afdfa7d1a9ad 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-256-ult-ugt.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-256-ult-ugt.ll
@@ -3337,8 +3337,7 @@ define <8 x i32> @ugt_1_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_1_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_1_v8i32:
@@ -3496,8 +3495,7 @@ define <8 x i32> @ugt_2_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_2_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_2_v8i32:
@@ -3523,8 +3521,7 @@ define <8 x i32> @ugt_2_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -3702,8 +3699,7 @@ define <8 x i32> @ugt_3_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_3_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [3,3,3,3,3,3,3,3]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_3_v8i32:
@@ -3729,8 +3725,7 @@ define <8 x i32> @ugt_3_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [3,3,3,3,3,3,3,3]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -3908,8 +3903,7 @@ define <8 x i32> @ugt_4_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_4_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_4_v8i32:
@@ -3935,8 +3929,7 @@ define <8 x i32> @ugt_4_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
@@ -4114,8 +4107,7 @@ define <8 x i32> @ugt_5_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_5_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [5,5,5,5,5,5,5,5]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_5_v8i32:
@@ -4141,8 +4133,7 @@ define <8 x i32> @ugt_5_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [5,5,5,5,5,5,5,5]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -4320,8 +4311,7 @@ define <8 x i32> @ugt_6_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_6_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [6,6,6,6,6,6,6,6]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_6_v8i32:
@@ -4347,8 +4337,7 @@ define <8 x i32> @ugt_6_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [6,6,6,6,6,6,6,6]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
@@ -4526,8 +4515,7 @@ define <8 x i32> @ugt_7_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_7_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [7,7,7,7,7,7,7,7]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_7_v8i32:
@@ -4553,8 +4541,7 @@ define <8 x i32> @ugt_7_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [7,7,7,7,7,7,7,7]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
@@ -4732,8 +4719,7 @@ define <8 x i32> @ugt_8_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_8_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [8,8,8,8,8,8,8,8]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_8_v8i32:
@@ -4759,8 +4745,7 @@ define <8 x i32> @ugt_8_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [8,8,8,8,8,8,8,8]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
@@ -4938,8 +4923,7 @@ define <8 x i32> @ugt_9_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_9_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [9,9,9,9,9,9,9,9]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_9_v8i32:
@@ -4965,8 +4949,7 @@ define <8 x i32> @ugt_9_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [9,9,9,9,9,9,9,9]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9>
@@ -5144,8 +5127,7 @@ define <8 x i32> @ugt_10_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_10_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [10,10,10,10,10,10,10,10]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_10_v8i32:
@@ -5171,8 +5153,7 @@ define <8 x i32> @ugt_10_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [10,10,10,10,10,10,10,10]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
@@ -5350,8 +5331,7 @@ define <8 x i32> @ugt_11_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_11_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [11,11,11,11,11,11,11,11]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_11_v8i32:
@@ -5377,8 +5357,7 @@ define <8 x i32> @ugt_11_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [11,11,11,11,11,11,11,11]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 11, i32 11, i32 11, i32 11, i32 11, i32 11, i32 11, i32 11>
@@ -5556,8 +5535,7 @@ define <8 x i32> @ugt_12_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_12_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [12,12,12,12,12,12,12,12]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_12_v8i32:
@@ -5583,8 +5561,7 @@ define <8 x i32> @ugt_12_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [12,12,12,12,12,12,12,12]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12>
@@ -5762,8 +5739,7 @@ define <8 x i32> @ugt_13_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_13_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [13,13,13,13,13,13,13,13]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_13_v8i32:
@@ -5789,8 +5765,7 @@ define <8 x i32> @ugt_13_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [13,13,13,13,13,13,13,13]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13>
@@ -5968,8 +5943,7 @@ define <8 x i32> @ugt_14_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_14_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [14,14,14,14,14,14,14,14]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_14_v8i32:
@@ -5995,8 +5969,7 @@ define <8 x i32> @ugt_14_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [14,14,14,14,14,14,14,14]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 14, i32 14, i32 14, i32 14, i32 14, i32 14, i32 14, i32 14>
@@ -6174,8 +6147,7 @@ define <8 x i32> @ugt_15_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_15_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_15_v8i32:
@@ -6201,8 +6173,7 @@ define <8 x i32> @ugt_15_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
@@ -6380,8 +6351,7 @@ define <8 x i32> @ugt_16_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_16_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_16_v8i32:
@@ -6407,8 +6377,7 @@ define <8 x i32> @ugt_16_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
@@ -6586,8 +6555,7 @@ define <8 x i32> @ugt_17_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_17_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [17,17,17,17,17,17,17,17]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_17_v8i32:
@@ -6613,8 +6581,7 @@ define <8 x i32> @ugt_17_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [17,17,17,17,17,17,17,17]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
@@ -6792,8 +6759,7 @@ define <8 x i32> @ugt_18_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_18_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [18,18,18,18,18,18,18,18]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_18_v8i32:
@@ -6819,8 +6785,7 @@ define <8 x i32> @ugt_18_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [18,18,18,18,18,18,18,18]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18>
@@ -6998,8 +6963,7 @@ define <8 x i32> @ugt_19_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_19_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [19,19,19,19,19,19,19,19]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_19_v8i32:
@@ -7025,8 +6989,7 @@ define <8 x i32> @ugt_19_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [19,19,19,19,19,19,19,19]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 19, i32 19, i32 19, i32 19, i32 19, i32 19, i32 19, i32 19>
@@ -7204,8 +7167,7 @@ define <8 x i32> @ugt_20_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_20_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [20,20,20,20,20,20,20,20]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_20_v8i32:
@@ -7231,8 +7193,7 @@ define <8 x i32> @ugt_20_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [20,20,20,20,20,20,20,20]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 20, i32 20, i32 20, i32 20, i32 20, i32 20, i32 20, i32 20>
@@ -7410,8 +7371,7 @@ define <8 x i32> @ugt_21_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_21_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [21,21,21,21,21,21,21,21]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_21_v8i32:
@@ -7437,8 +7397,7 @@ define <8 x i32> @ugt_21_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [21,21,21,21,21,21,21,21]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 21, i32 21, i32 21, i32 21, i32 21, i32 21, i32 21, i32 21>
@@ -7616,8 +7575,7 @@ define <8 x i32> @ugt_22_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_22_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [22,22,22,22,22,22,22,22]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_22_v8i32:
@@ -7643,8 +7601,7 @@ define <8 x i32> @ugt_22_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [22,22,22,22,22,22,22,22]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22>
@@ -7822,8 +7779,7 @@ define <8 x i32> @ugt_23_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_23_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [23,23,23,23,23,23,23,23]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_23_v8i32:
@@ -7849,8 +7805,7 @@ define <8 x i32> @ugt_23_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [23,23,23,23,23,23,23,23]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23>
@@ -8028,8 +7983,7 @@ define <8 x i32> @ugt_24_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_24_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [24,24,24,24,24,24,24,24]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_24_v8i32:
@@ -8055,8 +8009,7 @@ define <8 x i32> @ugt_24_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [24,24,24,24,24,24,24,24]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
@@ -8234,8 +8187,7 @@ define <8 x i32> @ugt_25_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_25_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [25,25,25,25,25,25,25,25]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_25_v8i32:
@@ -8261,8 +8213,7 @@ define <8 x i32> @ugt_25_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [25,25,25,25,25,25,25,25]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25>
@@ -8440,8 +8391,7 @@ define <8 x i32> @ugt_26_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_26_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [26,26,26,26,26,26,26,26]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_26_v8i32:
@@ -8467,8 +8417,7 @@ define <8 x i32> @ugt_26_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [26,26,26,26,26,26,26,26]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 26, i32 26, i32 26, i32 26, i32 26, i32 26, i32 26, i32 26>
@@ -8646,8 +8595,7 @@ define <8 x i32> @ugt_27_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_27_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [27,27,27,27,27,27,27,27]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_27_v8i32:
@@ -8673,8 +8621,7 @@ define <8 x i32> @ugt_27_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [27,27,27,27,27,27,27,27]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 27, i32 27, i32 27, i32 27, i32 27, i32 27, i32 27, i32 27>
@@ -8852,8 +8799,7 @@ define <8 x i32> @ugt_28_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_28_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [28,28,28,28,28,28,28,28]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_28_v8i32:
@@ -8879,8 +8825,7 @@ define <8 x i32> @ugt_28_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [28,28,28,28,28,28,28,28]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28>
@@ -9058,8 +9003,7 @@ define <8 x i32> @ugt_29_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_29_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [29,29,29,29,29,29,29,29]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_29_v8i32:
@@ -9085,8 +9029,7 @@ define <8 x i32> @ugt_29_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [29,29,29,29,29,29,29,29]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 29, i32 29, i32 29, i32 29, i32 29, i32 29, i32 29, i32 29>
@@ -9264,8 +9207,7 @@ define <8 x i32> @ugt_30_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_30_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [30,30,30,30,30,30,30,30]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_30_v8i32:
@@ -9291,8 +9233,7 @@ define <8 x i32> @ugt_30_v8i32(<8 x i32> %0) {
 ; BITALG-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 ; BITALG-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [30,30,30,30,30,30,30,30]
-; BITALG-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %0)
   %3 = icmp ugt <8 x i32> %2, <i32 30, i32 30, i32 30, i32 30, i32 30, i32 30, i32 30, i32 30>
@@ -9441,8 +9382,7 @@ define <4 x i64> @ugt_1_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_1_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_1_v4i64:
@@ -9589,8 +9529,7 @@ define <4 x i64> @ugt_2_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_2_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [2,2,2,2]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_2_v4i64:
@@ -9608,8 +9547,7 @@ define <4 x i64> @ugt_2_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [2,2,2,2]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 2, i64 2, i64 2, i64 2>
@@ -9757,8 +9695,7 @@ define <4 x i64> @ugt_3_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_3_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_3_v4i64:
@@ -9776,8 +9713,7 @@ define <4 x i64> @ugt_3_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 3, i64 3, i64 3, i64 3>
@@ -9925,8 +9861,7 @@ define <4 x i64> @ugt_4_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_4_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4,4,4,4]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_4_v4i64:
@@ -9944,8 +9879,7 @@ define <4 x i64> @ugt_4_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4,4,4,4]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 4, i64 4, i64 4, i64 4>
@@ -10093,8 +10027,7 @@ define <4 x i64> @ugt_5_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_5_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [5,5,5,5]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_5_v4i64:
@@ -10112,8 +10045,7 @@ define <4 x i64> @ugt_5_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [5,5,5,5]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 5, i64 5, i64 5, i64 5>
@@ -10261,8 +10193,7 @@ define <4 x i64> @ugt_6_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_6_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [6,6,6,6]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_6_v4i64:
@@ -10280,8 +10211,7 @@ define <4 x i64> @ugt_6_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [6,6,6,6]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 6, i64 6, i64 6, i64 6>
@@ -10429,8 +10359,7 @@ define <4 x i64> @ugt_7_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_7_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [7,7,7,7]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_7_v4i64:
@@ -10448,8 +10377,7 @@ define <4 x i64> @ugt_7_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [7,7,7,7]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 7, i64 7, i64 7, i64 7>
@@ -10597,8 +10525,7 @@ define <4 x i64> @ugt_8_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_8_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [8,8,8,8]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_8_v4i64:
@@ -10616,8 +10543,7 @@ define <4 x i64> @ugt_8_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [8,8,8,8]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 8, i64 8, i64 8, i64 8>
@@ -10765,8 +10691,7 @@ define <4 x i64> @ugt_9_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_9_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9,9,9,9]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_9_v4i64:
@@ -10784,8 +10709,7 @@ define <4 x i64> @ugt_9_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9,9,9,9]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 9, i64 9, i64 9, i64 9>
@@ -10933,8 +10857,7 @@ define <4 x i64> @ugt_10_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_10_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [10,10,10,10]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_10_v4i64:
@@ -10952,8 +10875,7 @@ define <4 x i64> @ugt_10_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [10,10,10,10]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 10, i64 10, i64 10, i64 10>
@@ -11101,8 +11023,7 @@ define <4 x i64> @ugt_11_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_11_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,11,11,11]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_11_v4i64:
@@ -11120,8 +11041,7 @@ define <4 x i64> @ugt_11_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,11,11,11]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 11, i64 11, i64 11, i64 11>
@@ -11269,8 +11189,7 @@ define <4 x i64> @ugt_12_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_12_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [12,12,12,12]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_12_v4i64:
@@ -11288,8 +11207,7 @@ define <4 x i64> @ugt_12_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [12,12,12,12]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 12, i64 12, i64 12, i64 12>
@@ -11437,8 +11355,7 @@ define <4 x i64> @ugt_13_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_13_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [13,13,13,13]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_13_v4i64:
@@ -11456,8 +11373,7 @@ define <4 x i64> @ugt_13_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [13,13,13,13]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 13, i64 13, i64 13, i64 13>
@@ -11605,8 +11521,7 @@ define <4 x i64> @ugt_14_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_14_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [14,14,14,14]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_14_v4i64:
@@ -11624,8 +11539,7 @@ define <4 x i64> @ugt_14_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [14,14,14,14]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 14, i64 14, i64 14, i64 14>
@@ -11773,8 +11687,7 @@ define <4 x i64> @ugt_15_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_15_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [15,15,15,15]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_15_v4i64:
@@ -11792,8 +11705,7 @@ define <4 x i64> @ugt_15_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [15,15,15,15]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 15, i64 15, i64 15, i64 15>
@@ -11941,8 +11853,7 @@ define <4 x i64> @ugt_16_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_16_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [16,16,16,16]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_16_v4i64:
@@ -11960,8 +11871,7 @@ define <4 x i64> @ugt_16_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [16,16,16,16]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 16, i64 16, i64 16, i64 16>
@@ -12109,8 +12019,7 @@ define <4 x i64> @ugt_17_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_17_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [17,17,17,17]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_17_v4i64:
@@ -12128,8 +12037,7 @@ define <4 x i64> @ugt_17_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [17,17,17,17]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 17, i64 17, i64 17, i64 17>
@@ -12277,8 +12185,7 @@ define <4 x i64> @ugt_18_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_18_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [18,18,18,18]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_18_v4i64:
@@ -12296,8 +12203,7 @@ define <4 x i64> @ugt_18_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [18,18,18,18]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 18, i64 18, i64 18, i64 18>
@@ -12445,8 +12351,7 @@ define <4 x i64> @ugt_19_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_19_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [19,19,19,19]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_19_v4i64:
@@ -12464,8 +12369,7 @@ define <4 x i64> @ugt_19_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [19,19,19,19]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 19, i64 19, i64 19, i64 19>
@@ -12613,8 +12517,7 @@ define <4 x i64> @ugt_20_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_20_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [20,20,20,20]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_20_v4i64:
@@ -12632,8 +12535,7 @@ define <4 x i64> @ugt_20_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [20,20,20,20]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 20, i64 20, i64 20, i64 20>
@@ -12781,8 +12683,7 @@ define <4 x i64> @ugt_21_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_21_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [21,21,21,21]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_21_v4i64:
@@ -12800,8 +12701,7 @@ define <4 x i64> @ugt_21_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [21,21,21,21]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 21, i64 21, i64 21, i64 21>
@@ -12949,8 +12849,7 @@ define <4 x i64> @ugt_22_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_22_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [22,22,22,22]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_22_v4i64:
@@ -12968,8 +12867,7 @@ define <4 x i64> @ugt_22_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [22,22,22,22]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 22, i64 22, i64 22, i64 22>
@@ -13117,8 +13015,7 @@ define <4 x i64> @ugt_23_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_23_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [23,23,23,23]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_23_v4i64:
@@ -13136,8 +13033,7 @@ define <4 x i64> @ugt_23_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [23,23,23,23]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 23, i64 23, i64 23, i64 23>
@@ -13285,8 +13181,7 @@ define <4 x i64> @ugt_24_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_24_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [24,24,24,24]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_24_v4i64:
@@ -13304,8 +13199,7 @@ define <4 x i64> @ugt_24_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [24,24,24,24]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 24, i64 24, i64 24, i64 24>
@@ -13453,8 +13347,7 @@ define <4 x i64> @ugt_25_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_25_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [25,25,25,25]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_25_v4i64:
@@ -13472,8 +13365,7 @@ define <4 x i64> @ugt_25_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [25,25,25,25]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 25, i64 25, i64 25, i64 25>
@@ -13621,8 +13513,7 @@ define <4 x i64> @ugt_26_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_26_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [26,26,26,26]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_26_v4i64:
@@ -13640,8 +13531,7 @@ define <4 x i64> @ugt_26_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [26,26,26,26]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 26, i64 26, i64 26, i64 26>
@@ -13789,8 +13679,7 @@ define <4 x i64> @ugt_27_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_27_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [27,27,27,27]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_27_v4i64:
@@ -13808,8 +13697,7 @@ define <4 x i64> @ugt_27_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [27,27,27,27]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 27, i64 27, i64 27, i64 27>
@@ -13957,8 +13845,7 @@ define <4 x i64> @ugt_28_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_28_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [28,28,28,28]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_28_v4i64:
@@ -13976,8 +13863,7 @@ define <4 x i64> @ugt_28_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [28,28,28,28]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 28, i64 28, i64 28, i64 28>
@@ -14125,8 +14011,7 @@ define <4 x i64> @ugt_29_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_29_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [29,29,29,29]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_29_v4i64:
@@ -14144,8 +14029,7 @@ define <4 x i64> @ugt_29_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [29,29,29,29]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 29, i64 29, i64 29, i64 29>
@@ -14293,8 +14177,7 @@ define <4 x i64> @ugt_30_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_30_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [30,30,30,30]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_30_v4i64:
@@ -14312,8 +14195,7 @@ define <4 x i64> @ugt_30_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [30,30,30,30]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 30, i64 30, i64 30, i64 30>
@@ -14461,8 +14343,7 @@ define <4 x i64> @ugt_31_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_31_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [31,31,31,31]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_31_v4i64:
@@ -14480,8 +14361,7 @@ define <4 x i64> @ugt_31_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [31,31,31,31]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 31, i64 31, i64 31, i64 31>
@@ -14629,8 +14509,7 @@ define <4 x i64> @ugt_32_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_32_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [32,32,32,32]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_32_v4i64:
@@ -14648,8 +14527,7 @@ define <4 x i64> @ugt_32_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [32,32,32,32]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 32, i64 32, i64 32, i64 32>
@@ -14797,8 +14675,7 @@ define <4 x i64> @ugt_33_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_33_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [33,33,33,33]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_33_v4i64:
@@ -14816,8 +14693,7 @@ define <4 x i64> @ugt_33_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [33,33,33,33]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 33, i64 33, i64 33, i64 33>
@@ -14965,8 +14841,7 @@ define <4 x i64> @ugt_34_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_34_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [34,34,34,34]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_34_v4i64:
@@ -14984,8 +14859,7 @@ define <4 x i64> @ugt_34_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [34,34,34,34]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 34, i64 34, i64 34, i64 34>
@@ -15133,8 +15007,7 @@ define <4 x i64> @ugt_35_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_35_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [35,35,35,35]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_35_v4i64:
@@ -15152,8 +15025,7 @@ define <4 x i64> @ugt_35_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [35,35,35,35]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 35, i64 35, i64 35, i64 35>
@@ -15301,8 +15173,7 @@ define <4 x i64> @ugt_36_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_36_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [36,36,36,36]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_36_v4i64:
@@ -15320,8 +15191,7 @@ define <4 x i64> @ugt_36_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [36,36,36,36]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 36, i64 36, i64 36, i64 36>
@@ -15469,8 +15339,7 @@ define <4 x i64> @ugt_37_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_37_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [37,37,37,37]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_37_v4i64:
@@ -15488,8 +15357,7 @@ define <4 x i64> @ugt_37_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [37,37,37,37]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 37, i64 37, i64 37, i64 37>
@@ -15637,8 +15505,7 @@ define <4 x i64> @ugt_38_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_38_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [38,38,38,38]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_38_v4i64:
@@ -15656,8 +15523,7 @@ define <4 x i64> @ugt_38_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [38,38,38,38]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 38, i64 38, i64 38, i64 38>
@@ -15805,8 +15671,7 @@ define <4 x i64> @ugt_39_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_39_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [39,39,39,39]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_39_v4i64:
@@ -15824,8 +15689,7 @@ define <4 x i64> @ugt_39_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [39,39,39,39]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 39, i64 39, i64 39, i64 39>
@@ -15973,8 +15837,7 @@ define <4 x i64> @ugt_40_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_40_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [40,40,40,40]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_40_v4i64:
@@ -15992,8 +15855,7 @@ define <4 x i64> @ugt_40_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [40,40,40,40]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 40, i64 40, i64 40, i64 40>
@@ -16141,8 +16003,7 @@ define <4 x i64> @ugt_41_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_41_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [41,41,41,41]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_41_v4i64:
@@ -16160,8 +16021,7 @@ define <4 x i64> @ugt_41_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [41,41,41,41]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 41, i64 41, i64 41, i64 41>
@@ -16309,8 +16169,7 @@ define <4 x i64> @ugt_42_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_42_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [42,42,42,42]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_42_v4i64:
@@ -16328,8 +16187,7 @@ define <4 x i64> @ugt_42_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [42,42,42,42]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 42, i64 42, i64 42, i64 42>
@@ -16477,8 +16335,7 @@ define <4 x i64> @ugt_43_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_43_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [43,43,43,43]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_43_v4i64:
@@ -16496,8 +16353,7 @@ define <4 x i64> @ugt_43_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [43,43,43,43]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 43, i64 43, i64 43, i64 43>
@@ -16645,8 +16501,7 @@ define <4 x i64> @ugt_44_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_44_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [44,44,44,44]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_44_v4i64:
@@ -16664,8 +16519,7 @@ define <4 x i64> @ugt_44_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [44,44,44,44]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 44, i64 44, i64 44, i64 44>
@@ -16813,8 +16667,7 @@ define <4 x i64> @ugt_45_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_45_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [45,45,45,45]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_45_v4i64:
@@ -16832,8 +16685,7 @@ define <4 x i64> @ugt_45_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [45,45,45,45]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 45, i64 45, i64 45, i64 45>
@@ -16981,8 +16833,7 @@ define <4 x i64> @ugt_46_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_46_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [46,46,46,46]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_46_v4i64:
@@ -17000,8 +16851,7 @@ define <4 x i64> @ugt_46_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [46,46,46,46]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 46, i64 46, i64 46, i64 46>
@@ -17149,8 +16999,7 @@ define <4 x i64> @ugt_47_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_47_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [47,47,47,47]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_47_v4i64:
@@ -17168,8 +17017,7 @@ define <4 x i64> @ugt_47_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [47,47,47,47]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 47, i64 47, i64 47, i64 47>
@@ -17317,8 +17165,7 @@ define <4 x i64> @ugt_48_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_48_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [48,48,48,48]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_48_v4i64:
@@ -17336,8 +17183,7 @@ define <4 x i64> @ugt_48_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [48,48,48,48]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 48, i64 48, i64 48, i64 48>
@@ -17485,8 +17331,7 @@ define <4 x i64> @ugt_49_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_49_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [49,49,49,49]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_49_v4i64:
@@ -17504,8 +17349,7 @@ define <4 x i64> @ugt_49_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [49,49,49,49]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 49, i64 49, i64 49, i64 49>
@@ -17653,8 +17497,7 @@ define <4 x i64> @ugt_50_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_50_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [50,50,50,50]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_50_v4i64:
@@ -17672,8 +17515,7 @@ define <4 x i64> @ugt_50_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [50,50,50,50]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 50, i64 50, i64 50, i64 50>
@@ -17821,8 +17663,7 @@ define <4 x i64> @ugt_51_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_51_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [51,51,51,51]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_51_v4i64:
@@ -17840,8 +17681,7 @@ define <4 x i64> @ugt_51_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [51,51,51,51]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 51, i64 51, i64 51, i64 51>
@@ -17989,8 +17829,7 @@ define <4 x i64> @ugt_52_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_52_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [52,52,52,52]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_52_v4i64:
@@ -18008,8 +17847,7 @@ define <4 x i64> @ugt_52_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [52,52,52,52]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 52, i64 52, i64 52, i64 52>
@@ -18157,8 +17995,7 @@ define <4 x i64> @ugt_53_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_53_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [53,53,53,53]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_53_v4i64:
@@ -18176,8 +18013,7 @@ define <4 x i64> @ugt_53_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [53,53,53,53]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 53, i64 53, i64 53, i64 53>
@@ -18325,8 +18161,7 @@ define <4 x i64> @ugt_54_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_54_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [54,54,54,54]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_54_v4i64:
@@ -18344,8 +18179,7 @@ define <4 x i64> @ugt_54_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [54,54,54,54]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 54, i64 54, i64 54, i64 54>
@@ -18493,8 +18327,7 @@ define <4 x i64> @ugt_55_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_55_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [55,55,55,55]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_55_v4i64:
@@ -18512,8 +18345,7 @@ define <4 x i64> @ugt_55_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [55,55,55,55]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 55, i64 55, i64 55, i64 55>
@@ -18661,8 +18493,7 @@ define <4 x i64> @ugt_56_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_56_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [56,56,56,56]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_56_v4i64:
@@ -18680,8 +18511,7 @@ define <4 x i64> @ugt_56_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [56,56,56,56]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 56, i64 56, i64 56, i64 56>
@@ -18829,8 +18659,7 @@ define <4 x i64> @ugt_57_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_57_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [57,57,57,57]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_57_v4i64:
@@ -18848,8 +18677,7 @@ define <4 x i64> @ugt_57_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [57,57,57,57]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 57, i64 57, i64 57, i64 57>
@@ -18997,8 +18825,7 @@ define <4 x i64> @ugt_58_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_58_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [58,58,58,58]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_58_v4i64:
@@ -19016,8 +18843,7 @@ define <4 x i64> @ugt_58_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [58,58,58,58]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 58, i64 58, i64 58, i64 58>
@@ -19165,8 +18991,7 @@ define <4 x i64> @ugt_59_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_59_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [59,59,59,59]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_59_v4i64:
@@ -19184,8 +19009,7 @@ define <4 x i64> @ugt_59_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [59,59,59,59]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 59, i64 59, i64 59, i64 59>
@@ -19333,8 +19157,7 @@ define <4 x i64> @ugt_60_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_60_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [60,60,60,60]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_60_v4i64:
@@ -19352,8 +19175,7 @@ define <4 x i64> @ugt_60_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [60,60,60,60]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 60, i64 60, i64 60, i64 60>
@@ -19501,8 +19323,7 @@ define <4 x i64> @ugt_61_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_61_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [61,61,61,61]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_61_v4i64:
@@ -19520,8 +19341,7 @@ define <4 x i64> @ugt_61_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [61,61,61,61]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 61, i64 61, i64 61, i64 61>
@@ -19669,8 +19489,7 @@ define <4 x i64> @ugt_62_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ugt_62_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [62,62,62,62]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: ugt_62_v4i64:
@@ -19688,8 +19507,7 @@ define <4 x i64> @ugt_62_v4i64(<4 x i64> %0) {
 ; BITALG-NEXT:    vpopcntb %ymm0, %ymm0
 ; BITALG-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; BITALG-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
-; BITALG-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [62,62,62,62]
-; BITALG-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; BITALG-NEXT:    vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; BITALG-NEXT:    retq
   %2 = tail call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %0)
   %3 = icmp ugt <4 x i64> %2, <i64 62, i64 62, i64 62, i64 62>
diff --git a/llvm/test/CodeGen/X86/vector-popcnt-256.ll b/llvm/test/CodeGen/X86/vector-popcnt-256.ll
index a5ca4affdbc6b..a16287b0a6b3f 100644
--- a/llvm/test/CodeGen/X86/vector-popcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-popcnt-256.ll
@@ -496,8 +496,7 @@ define <4 x i64> @eq_1_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: eq_1_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: eq_1_v4i64:
@@ -583,8 +582,7 @@ define <4 x i64> @ne_1_v4i64(<4 x i64> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ne_1_v4i64:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntq %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    vpternlogq $15, %ymm0, %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
@@ -663,8 +661,7 @@ define <8 x i32> @eq_1_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: eq_1_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
 ; BITALG_NOVLX-LABEL: eq_1_v8i32:
@@ -744,8 +741,7 @@ define <8 x i32> @ne_1_v8i32(<8 x i32> %0) {
 ; AVX512VPOPCNTDQVL-LABEL: ne_1_v8i32:
 ; AVX512VPOPCNTDQVL:       # %bb.0:
 ; AVX512VPOPCNTDQVL-NEXT:    vpopcntd %ymm0, %ymm0
-; AVX512VPOPCNTDQVL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQVL-NEXT:    vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    vpternlogq $15, %ymm0, %ymm0, %ymm0
 ; AVX512VPOPCNTDQVL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll
index 49cd4d20d166a..104d9e3a3f07c 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll
@@ -27,30 +27,11 @@ define i1 @trunc_v2i64_v2i1(<2 x i64>) nounwind {
 ; SSE41-NEXT:    setb %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v2i64_v2i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setb %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v2i64_v2i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setb %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v2i64_v2i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setb %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v2i64_v2i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setb %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v2i64_v2i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    retq
   %a = trunc <2 x i64> %0 to <2 x i1>
   %b = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> %a)
   ret i1 %b
@@ -71,30 +52,11 @@ define i1 @trunc_v4i32_v4i1(<4 x i32>) nounwind {
 ; SSE41-NEXT:    setb %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v4i32_v4i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setb %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v4i32_v4i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setb %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v4i32_v4i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setb %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v4i32_v4i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4294967297,4294967297]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setb %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v4i32_v4i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    retq
   %a = trunc <4 x i32> %0 to <4 x i1>
   %b = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> %a)
   ret i1 %b
@@ -116,30 +78,11 @@ define i1 @trunc_v8i16_v8i1(<8 x i16>) nounwind {
 ; SSE41-NEXT:    setb %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v8i16_v8i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setb %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v8i16_v8i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setb %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v8i16_v8i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setb %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v8i16_v8i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [281479271743489,281479271743489]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setb %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v8i16_v8i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    retq
   %a = trunc <8 x i16> %0 to <8 x i1>
   %b = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> %a)
   ret i1 %b
@@ -160,30 +103,11 @@ define i1 @trunc_v16i8_v16i1(<16 x i8>) nounwind {
 ; SSE41-NEXT:    setb %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v16i8_v16i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setb %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v16i8_v16i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setb %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v16i8_v16i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setb %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v16i8_v16i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [72340172838076673,72340172838076673]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setb %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v16i8_v16i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setb %al
+; AVX-NEXT:    retq
   %a = trunc <16 x i8> %0 to <16 x i1>
   %b = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> %a)
   ret i1 %b
@@ -221,13 +145,28 @@ define i1 @trunc_v4i64_v4i1(<4 x i64>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v4i64_v4i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setb %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v4i64_v4i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setb %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v4i64_v4i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setb %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v4i64_v4i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setb %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <4 x i64> %0 to <4 x i1>
   %b = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> %a)
   ret i1 %b
@@ -265,13 +204,28 @@ define i1 @trunc_v8i32_v8i1(<8 x i32>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v8i32_v8i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setb %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v8i32_v8i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setb %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v8i32_v8i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setb %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v8i32_v8i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setb %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <8 x i32> %0 to <8 x i1>
   %b = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> %a)
   ret i1 %b
@@ -310,13 +264,28 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v16i16_v16i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setb %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v16i16_v16i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setb %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v16i16_v16i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setb %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v16i16_v16i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setb %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <16 x i16> %0 to <16 x i1>
   %b = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> %a)
   ret i1 %b
@@ -354,13 +323,28 @@ define i1 @trunc_v32i8_v32i1(<32 x i8>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v32i8_v32i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setb %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v32i8_v32i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setb %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v32i8_v32i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setb %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v32i8_v32i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setb %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <32 x i8> %0 to <32 x i1>
   %b = call i1 @llvm.vector.reduce.and.v32i1(<32 x i1> %a)
   ret i1 %b
diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
index f80544fdef7e6..39798be110674 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
@@ -27,30 +27,11 @@ define i1 @trunc_v2i64_v2i1(<2 x i64>) nounwind {
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v2i64_v2i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setne %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v2i64_v2i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setne %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v2i64_v2i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setne %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v2i64_v2i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [1,1]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setne %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v2i64_v2i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    retq
   %a = trunc <2 x i64> %0 to <2 x i1>
   %b = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> %a)
   ret i1 %b
@@ -71,30 +52,11 @@ define i1 @trunc_v4i32_v4i1(<4 x i32>) nounwind {
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v4i32_v4i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setne %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v4i32_v4i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setne %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v4i32_v4i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setne %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v4i32_v4i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [4294967297,4294967297]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setne %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v4i32_v4i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    retq
   %a = trunc <4 x i32> %0 to <4 x i1>
   %b = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %a)
   ret i1 %b
@@ -115,30 +77,11 @@ define i1 @trunc_v8i16_v8i1(<8 x i16>) nounwind {
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v8i16_v8i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setne %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v8i16_v8i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setne %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v8i16_v8i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setne %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v8i16_v8i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [281479271743489,281479271743489]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setne %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v8i16_v8i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    retq
   %a = trunc <8 x i16> %0 to <8 x i1>
   %b = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> %a)
   ret i1 %b
@@ -159,30 +102,11 @@ define i1 @trunc_v16i8_v16i1(<16 x i8>) nounwind {
 ; SSE41-NEXT:    setne %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v16i8_v16i1:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    setne %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v16i8_v16i1:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    setne %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v16i8_v16i1:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    setne %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512VL-LABEL: trunc_v16i8_v16i1:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [72340172838076673,72340172838076673]
-; AVX512VL-NEXT:    vptest %xmm1, %xmm0
-; AVX512VL-NEXT:    setne %al
-; AVX512VL-NEXT:    retq
+; AVX-LABEL: trunc_v16i8_v16i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    retq
   %a = trunc <16 x i8> %0 to <16 x i1>
   %b = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> %a)
   ret i1 %b
@@ -220,13 +144,28 @@ define i1 @trunc_v4i64_v4i1(<4 x i64>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v4i64_v4i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setne %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v4i64_v4i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setne %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v4i64_v4i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setne %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v4i64_v4i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setne %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <4 x i64> %0 to <4 x i1>
   %b = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %a)
   ret i1 %b
@@ -264,13 +203,28 @@ define i1 @trunc_v8i32_v8i1(<8 x i32>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v8i32_v8i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setne %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v8i32_v8i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setne %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v8i32_v8i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967297,4294967297,4294967297,4294967297]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setne %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v8i32_v8i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setne %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <8 x i32> %0 to <8 x i1>
   %b = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> %a)
   ret i1 %b
@@ -308,13 +262,28 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v16i16_v16i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setne %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v16i16_v16i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setne %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v16i16_v16i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [281479271743489,281479271743489,281479271743489,281479271743489]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setne %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v16i16_v16i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setne %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <16 x i16> %0 to <16 x i1>
   %b = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> %a)
   ret i1 %b
@@ -352,13 +321,28 @@ define i1 @trunc_v32i8_v32i1(<32 x i8>) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_v32i8_v32i1:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    setne %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_v32i8_v32i1:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    setne %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_v32i8_v32i1:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [72340172838076673,72340172838076673,72340172838076673,72340172838076673]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    setne %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: trunc_v32i8_v32i1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512VL-NEXT:    setne %al
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a = trunc <32 x i8> %0 to <32 x i1>
   %b = call i1 @llvm.vector.reduce.or.v32i1(<32 x i1> %a)
   ret i1 %b
@@ -429,8 +413,7 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) nounwind {
 ;
 ; AVX512-LABEL: trunc_v8i64_v8i1:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1]
-; AVX512-NEXT:    vptestmd %zmm1, %zmm0, %k0
+; AVX512-NEXT:    vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %k0
 ; AVX512-NEXT:    kortestw %k0, %k0
 ; AVX512-NEXT:    setne %al
 ; AVX512-NEXT:    vzeroupper
@@ -2500,5 +2483,3 @@ declare i1 @llvm.vector.reduce.or.v8i1(<8 x i1>)
 declare i1 @llvm.vector.reduce.or.v16i1(<16 x i1>)
 declare i1 @llvm.vector.reduce.or.v32i1(<32 x i1>)
 declare i1 @llvm.vector.reduce.or.v64i1(<64 x i1>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX: {{.*}}
diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
index f8ba00b033299..d1112f9b4b699 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
@@ -829,30 +829,11 @@ define i1 @trunc_v2i64(<2 x i64> %a0) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: trunc_v2i64:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: trunc_v2i64:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    sete %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: trunc_v2i64:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    sete %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512BWVL-LABEL: trunc_v2i64:
-; AVX512BWVL:       # %bb.0:
-; AVX512BWVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [65535,65535]
-; AVX512BWVL-NEXT:    vptest %xmm1, %xmm0
-; AVX512BWVL-NEXT:    sete %al
-; AVX512BWVL-NEXT:    retq
+; AVX-LABEL: trunc_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %1 = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %a0)
   %2 = trunc i64 %1 to i16
   %3 = icmp eq i16 %2, 0
@@ -890,13 +871,28 @@ define i1 @mask_v8i32(<8 x i32> %a0) {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: mask_v8i32:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
-; AVX512-NEXT:    vptest %ymm1, %ymm0
-; AVX512-NEXT:    sete %al
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: mask_v8i32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
+; AVX512F-NEXT:    vptest %ymm1, %ymm0
+; AVX512F-NEXT:    sete %al
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: mask_v8i32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
+; AVX512BW-NEXT:    vptest %ymm1, %ymm0
+; AVX512BW-NEXT:    sete %al
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: mask_v8i32:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
+; AVX512BWVL-NEXT:    sete %al
+; AVX512BWVL-NEXT:    vzeroupper
+; AVX512BWVL-NEXT:    retq
   %1 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a0)
   %2 = and i32 %1, 2147483648
   %3 = icmp eq i32 %2, 0
@@ -1018,34 +1014,12 @@ define zeroext i1 @PR44781(ptr %0) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1OR2-LABEL: PR44781:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovdqu (%rdi), %xmm0
-; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1OR2-NEXT:    sete %al
-; AVX1OR2-NEXT:    retq
-;
-; AVX512F-LABEL: PR44781:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqu (%rdi), %xmm0
-; AVX512F-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512F-NEXT:    sete %al
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: PR44781:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vmovdqu (%rdi), %xmm0
-; AVX512BW-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX512BW-NEXT:    sete %al
-; AVX512BW-NEXT:    retq
-;
-; AVX512BWVL-LABEL: PR44781:
-; AVX512BWVL:       # %bb.0:
-; AVX512BWVL-NEXT:    vmovdqu (%rdi), %xmm0
-; AVX512BWVL-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [64424509455,64424509455]
-; AVX512BWVL-NEXT:    vptest %xmm1, %xmm0
-; AVX512BWVL-NEXT:    sete %al
-; AVX512BWVL-NEXT:    retq
+; AVX-LABEL: PR44781:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqu (%rdi), %xmm0
+; AVX-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX-NEXT:    sete %al
+; AVX-NEXT:    retq
   %2 = load <4 x i32>, ptr %0, align 4
   %3 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %2)
   %4 = and i32 %3, 15
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index c55335f849569..a4785298e370c 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -688,7 +688,7 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
 ;
 ; AVX512VL-LABEL: splatvar_rotate_v16i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpsrlw %xmm3, %ymm4, %ymm3
@@ -710,7 +710,7 @@ define <16 x i16> @splatvar_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
 ;
 ; AVX512VLBW-LABEL: splatvar_rotate_v16i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VLBW-NEXT:    vpsrlw $1, %ymm0, %ymm4
 ; AVX512VLBW-NEXT:    vpsrlw %xmm3, %ymm4, %ymm3
diff --git a/llvm/test/CodeGen/X86/vector-rotate-512.ll b/llvm/test/CodeGen/X86/vector-rotate-512.ll
index 29afbf4c62ef5..dbeeb25a37453 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-512.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-512.ll
@@ -330,7 +330,7 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
 ;
 ; AVX512VL-LABEL: splatvar_rotate_v32i16:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm4, %ymm5
@@ -358,7 +358,7 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
 ;
 ; AVX512VLBW-LABEL: splatvar_rotate_v32i16:
 ; AVX512VLBW:       # %bb.0:
-; AVX512VLBW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [15,0,0,0,15,0,0,0]
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm2 = [15,0,0,0]
 ; AVX512VLBW-NEXT:    vpandn %xmm2, %xmm1, %xmm3
 ; AVX512VLBW-NEXT:    vpsrlw $1, %zmm0, %zmm4
 ; AVX512VLBW-NEXT:    vpsrlw %xmm3, %zmm4, %zmm3
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
index 30b743cb7bdfd..a467063fb509d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -1013,9 +1013,9 @@ define <8 x float> @shuffle_v8f32_32103210(<8 x float> %a, <8 x float> %b) {
 ;
 ; AVX512VL-FAST-ALL-LABEL: shuffle_v8f32_32103210:
 ; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [3,2,1,0,3,2,1,0]
+; AVX512VL-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [3,2,1,0,3,2,1,0]
 ; AVX512VL-FAST-ALL-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512VL-FAST-ALL-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX512VL-FAST-ALL-NEXT:    vpermps %ymm0, %ymm1, %ymm0
 ; AVX512VL-FAST-ALL-NEXT:    retq
 ;
 ; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8f32_32103210:
@@ -1061,9 +1061,9 @@ define <8 x float> @shuffle_v8f32_76547654(<8 x float> %a, <8 x float> %b) {
 ;
 ; AVX512VL-FAST-ALL-LABEL: shuffle_v8f32_76547654:
 ; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
+; AVX512VL-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
 ; AVX512VL-FAST-ALL-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512VL-FAST-ALL-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX512VL-FAST-ALL-NEXT:    vpermps %ymm0, %ymm1, %ymm0
 ; AVX512VL-FAST-ALL-NEXT:    retq
 ;
 ; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8f32_76547654:
@@ -2686,9 +2686,9 @@ define <8 x i32> @shuffle_v8i32_32103210(<8 x i32> %a, <8 x i32> %b) {
 ;
 ; AVX512VL-FAST-ALL-LABEL: shuffle_v8i32_32103210:
 ; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [3,2,1,0,3,2,1,0]
+; AVX512VL-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [3,2,1,0,3,2,1,0]
 ; AVX512VL-FAST-ALL-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512VL-FAST-ALL-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX512VL-FAST-ALL-NEXT:    vpermps %ymm0, %ymm1, %ymm0
 ; AVX512VL-FAST-ALL-NEXT:    retq
 ;
 ; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8i32_32103210:
@@ -2734,9 +2734,9 @@ define <8 x i32> @shuffle_v8i32_76547654(<8 x i32> %a, <8 x i32> %b) {
 ;
 ; AVX512VL-FAST-ALL-LABEL: shuffle_v8i32_76547654:
 ; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
+; AVX512VL-FAST-ALL-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
 ; AVX512VL-FAST-ALL-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512VL-FAST-ALL-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX512VL-FAST-ALL-NEXT:    vpermps %ymm0, %ymm1, %ymm0
 ; AVX512VL-FAST-ALL-NEXT:    retq
 ;
 ; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8i32_76547654:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 2bb7dd90cc05f..9f8852e5cd668 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -202,14 +202,14 @@ define <8 x double> @shuffle_v8f64_81a3c5e7(<8 x double> %a, <8 x double> %b) {
 define <8 x double> @shuffle_v8f64_08080808(<8 x double> %a, <8 x double> %b) {
 ; AVX512F-LABEL: shuffle_v8f64_08080808:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
+; AVX512F-NEXT:    vbroadcastf32x4 {{.*#+}} zmm2 = [0,8,0,8,0,8,0,8]
 ; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512F-32-LABEL: shuffle_v8f64_08080808:
 ; AVX512F-32:       # %bb.0:
-; AVX512F-32-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,0,8,0,0,0,8,0,0,0,8,0,0,0,8,0]
+; AVX512F-32-NEXT:    vbroadcastf32x4 {{.*#+}} zmm2 = [0,0,8,0,0,0,8,0,0,0,8,0,0,0,8,0]
 ; AVX512F-32-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-32-NEXT:    vpermt2pd %zmm1, %zmm2, %zmm0
 ; AVX512F-32-NEXT:    retl
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
index 8cc20ec3c1a7e..b00deab4df873 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -335,12 +335,15 @@ define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){
 ; X64-AVX512-NEXT:    vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
 ; X64-AVX512-NEXT:    retq
 ;
-; AVX512F-LABEL: test_mm512_mask_blend_epi8:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm2
-; AVX512F-NEXT:    vpternlogq $216, %zmm2, %zmm1, %zmm0
-; AVX512F-NEXT:    ret{{[l|q]}}
+; X86-AVX512F-LABEL: test_mm512_mask_blend_epi8:
+; X86-AVX512F:       # %bb.0: # %entry
+; X86-AVX512F-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm1, %zmm0
+; X86-AVX512F-NEXT:    retl
+;
+; X64-AVX512F-LABEL: test_mm512_mask_blend_epi8:
+; X64-AVX512F:       # %bb.0: # %entry
+; X64-AVX512F-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; X64-AVX512F-NEXT:    retq
 entry:
   %0 = shufflevector <64 x i8> %A, <64 x i8> %W, <64 x i32>  <i32 64, i32 1, i32 66, i32 3, i32 68, i32 5, i32 70, i32 7, i32 72, i32 9, i32 74, i32 11, i32 76, i32 13, i32 78, i32 15, i32 80, i32 17, i32 82, i32 19, i32 84, i32 21, i32 86, i32 23, i32 88, i32 25, i32 90, i32 27, i32 92, i32 29, i32 94, i32 31, i32 96, i32 33, i32 98, i32 35, i32 100, i32 37, i32 102, i32 39, i32 104, i32 41, i32 106, i32 43, i32 108, i32 45, i32 110, i32 47, i32 112, i32 49, i32 114, i32 51, i32 116, i32 53, i32 118, i32 55, i32 120, i32 57, i32 122, i32 59, i32 124, i32 61, i32 126, i32 63>
   ret <64 x i8> %0
@@ -658,7 +661,7 @@ define <32 x float> @PR47534(<8 x float> %tmp) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [7,25,26,27,7,29,30,31,7,25,26,27,7,29,30,31]
+; CHECK-NEXT:    vbroadcastf64x4 {{.*#+}} zmm1 = [7,25,26,27,7,29,30,31,7,25,26,27,7,29,30,31]
 ; CHECK-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; CHECK-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1
 ; CHECK-NEXT:    ret{{[l|q]}}
diff --git a/llvm/test/CodeGen/X86/widen_bitcnt.ll b/llvm/test/CodeGen/X86/widen_bitcnt.ll
index da468b6d809e8..b64d401e4f57c 100644
--- a/llvm/test/CodeGen/X86/widen_bitcnt.ll
+++ b/llvm/test/CodeGen/X86/widen_bitcnt.ll
@@ -1177,12 +1177,12 @@ define <4 x i32> @widen_cttz_v2i32_v4i32(<2 x i32> %a0, <2 x i32> %a1) {
 ; AVX512VL-NEXT:    vpaddd %xmm2, %xmm0, %xmm3
 ; AVX512VL-NEXT:    vpandn %xmm3, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vplzcntd %xmm0, %xmm0
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
 ; AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm2
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vplzcntd %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512VL-NEXT:    vpsubd %xmm0, %xmm3, %xmm0
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512VPOPCNT-LABEL: widen_cttz_v2i32_v4i32:
@@ -1422,7 +1422,8 @@ define <8 x i32> @widen_cttz_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>
 ; AVX512VL-NEXT:    vpandn %ymm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vplzcntd %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [32,32,32,32,32,32,32,32]
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6.7903865326698815E-313,6.7903865326698815E-313,6.7903865326698815E-313,6.7903865326698815E-313]
+; AVX512VL-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX512VL-NEXT:    vpsubd %ymm0, %ymm1, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
@@ -1526,12 +1527,12 @@ define <4 x i32> @widen_cttz_undef_v2i32_v4i32(<2 x i32> %a0, <2 x i32> %a1) {
 ; AVX512VL-NEXT:    vpaddd %xmm2, %xmm0, %xmm3
 ; AVX512VL-NEXT:    vpandn %xmm3, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vplzcntd %xmm0, %xmm0
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
 ; AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm2
 ; AVX512VL-NEXT:    vpandn %xmm2, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vplzcntd %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX512VL-NEXT:    vpsubd %xmm0, %xmm3, %xmm0
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [32,32,32,32]
+; AVX512VL-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512VPOPCNT-LABEL: widen_cttz_undef_v2i32_v4i32:
@@ -1771,7 +1772,8 @@ define <8 x i32> @widen_cttz_undef_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2
 ; AVX512VL-NEXT:    vpandn %ymm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vplzcntd %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [32,32,32,32,32,32,32,32]
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6.7903865326698815E-313,6.7903865326698815E-313,6.7903865326698815E-313,6.7903865326698815E-313]
+; AVX512VL-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX512VL-NEXT:    vpsubd %ymm0, %ymm1, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 68f2ed4368044..0dcdd2500a4d7 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -259,13 +259,14 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512F-NEXT:    vaddps %xmm7, %xmm8, %xmm7
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vpermt2pd %zmm2, %zmm5, %zmm0
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovapd {{.*#+}} ymm1 = [0,2,8,10]
+; AVX512F-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
 ; AVX512F-NEXT:    vmovupd %zmm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -298,7 +299,7 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512VL-NEXT:    vaddps %xmm7, %xmm8, %xmm7
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512VL-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512VL-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512VL-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
diff --git a/llvm/test/CodeGen/X86/widen_fdiv.ll b/llvm/test/CodeGen/X86/widen_fdiv.ll
index e4c9278478a5b..f571e6d83e621 100644
--- a/llvm/test/CodeGen/X86/widen_fdiv.ll
+++ b/llvm/test/CodeGen/X86/widen_fdiv.ll
@@ -207,13 +207,14 @@ define void @widen_fdiv_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512F-NEXT:    vdivps %xmm8, %xmm7, %xmm7
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vpermt2pd %zmm2, %zmm5, %zmm0
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovapd {{.*#+}} ymm1 = [0,2,8,10]
+; AVX512F-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
 ; AVX512F-NEXT:    vmovupd %zmm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -246,7 +247,7 @@ define void @widen_fdiv_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512VL-NEXT:    vdivps %xmm8, %xmm7, %xmm7
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512VL-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512VL-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512VL-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
diff --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index ac208da9ee11a..9fe6ece3e444d 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -259,13 +259,14 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512F-NEXT:    vmulps %xmm7, %xmm8, %xmm7
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vpermt2pd %zmm2, %zmm5, %zmm0
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovapd {{.*#+}} ymm1 = [0,2,8,10]
+; AVX512F-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
 ; AVX512F-NEXT:    vmovupd %zmm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -298,7 +299,7 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512VL-NEXT:    vmulps %xmm7, %xmm8, %xmm7
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512VL-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512VL-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512VL-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
diff --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 90cf455ba61fc..f14efc3efaf7f 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -259,13 +259,14 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512F-NEXT:    vsubps %xmm8, %xmm7, %xmm7
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512F-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
 ; AVX512F-NEXT:    vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vpermt2pd %zmm2, %zmm5, %zmm0
-; AVX512F-NEXT:    vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT:    vmovapd {{.*#+}} ymm1 = [0,2,8,10]
+; AVX512F-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1
+; AVX512F-NEXT:    vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
 ; AVX512F-NEXT:    vmovupd %zmm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -298,7 +299,7 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
 ; AVX512VL-NEXT:    vsubps %xmm8, %xmm7, %xmm7
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
-; AVX512VL-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT:    vbroadcastf64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
 ; AVX512VL-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512VL-NEXT:    vpermi2pd %zmm6, %zmm4, %zmm5
 ; AVX512VL-NEXT:    vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
index 8d6c1483d817f..1fb0ec8f7c5b1 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
@@ -6105,8 +6105,8 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512F-SLOW-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
@@ -6124,8 +6124,8 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512F-FAST-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
-; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512F-FAST-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
@@ -6144,8 +6144,8 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-SLOW-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
-; AVX512BW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512BW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-SLOW-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
@@ -6160,8 +6160,8 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-FAST-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
-; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512BW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 86e878261dcce..45839fce5e602 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -1482,10 +1482,8 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $202, %ymm0, %ymm1, %ymm2
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -1497,10 +1495,8 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpternlogq $202, %ymm0, %ymm1, %ymm2
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512DQ-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index b88e2921484d9..f1d5e7de95a71 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1213,10 +1213,9 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ;
 ; AVX512F-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-NEXT:    vpternlogq $172, 32(%rdi), %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVX512F-NEXT:    vpaddb (%rsi), %ymm1, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
@@ -1224,10 +1223,9 @@ define void @vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2(ptr %i
 ;
 ; AVX512DQ-LABEL: vec256_i8_widen_to_i128_factor16_broadcast_to_v2i128_factor2:
 ; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpternlogq $172, 32(%rdi), %ymm0, %ymm1
+; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX512DQ-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm1, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX512DQ-NEXT:    vzeroupper



More information about the llvm-commits mailing list