[llvm] [X86] Reuse X86ISD::SUBV_BROADCAST_LOAD for subvector loads across chains (PR #142381)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 3 01:08:20 PDT 2025


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/142381

>From ed03bd86e44f6e5bcdc656b3d18c270b24917be6 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 2 Jun 2025 13:57:31 +0100
Subject: [PATCH] [X86] Reuse X86ISD::SUBV_BROADCAST_LOAD for subvector loads
 across chains

Improve handling of folding a (small) vector load that is also loaded as a X86ISD::SUBV_BROADCAST_LOAD node to just (freely) extract the bottom subvector - similar to #139575 we should be checking the SUBV_BROADCAST_LOAD has uses of the loaded value, and not that the out chain is empty to ensure its actually used, we also must call makeEquivalentMemoryOrdering to ensure the out chains are correctly merged to handle any aliasing with later load/stores.

This PR is a little messy as it has 2 other inter-dependent changes to avoid regressions - now that we're properly merging subvector loads, we can drop the oneuse limit on the "vperm2x128(load(p),undef) -> broadcast128(p+offset)" and "insert_subvector(load256(p),load128(p),0) -> broadcast128(p)" folds.
---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |    9 +-
 ...d_vector_inreg_of_broadcast_from_memory.ll |    2 +-
 llvm/test/CodeGen/X86/oddshuffles.ll          |   94 +-
 llvm/test/CodeGen/X86/vector-interleave.ll    |    8 +-
 .../vector-interleaved-store-i32-stride-3.ll  |  597 +++--
 .../vector-interleaved-store-i32-stride-5.ll  | 1361 +++++-----
 .../vector-interleaved-store-i32-stride-6.ll  | 2187 ++++++++---------
 .../vector-interleaved-store-i32-stride-7.ll  |  173 +-
 .../vector-interleaved-store-i64-stride-2.ll  |  134 +-
 .../vector-interleaved-store-i8-stride-6.ll   | 1096 ++++-----
 .../vector-interleaved-store-i8-stride-7.ll   | 1746 +++++++------
 .../CodeGen/X86/x86-interleaved-access.ll     |    8 +-
 ...d_vector_inreg_of_broadcast_from_memory.ll |   71 +-
 13 files changed, 3642 insertions(+), 3844 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 71c699a318eb1..69db9c13ce753 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -42672,7 +42672,7 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
         bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, LHS);
         bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, LHS);
         if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() &&
-            X86::mayFoldLoad(LHS, Subtarget)) {
+            X86::mayFoldLoad(LHS, Subtarget, /*AssumeSingleUse=*/true)) {
           MVT MemVT = VT.getHalfNumVectorElementsVT();
           unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
           return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL, VT, MemVT,
@@ -53143,9 +53143,10 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
           User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
           UserLd->getChain() == Chain && UserLd->getBasePtr() == Ptr &&
           UserLd->getMemoryVT().getSizeInBits() == MemVT.getSizeInBits() &&
-          !User->hasAnyUseOfValue(1) &&
+          User->hasAnyUseOfValue(0) &&
           User->getValueSizeInBits(0).getFixedValue() >
               RegVT.getFixedSizeInBits()) {
+        DAG.makeEquivalentMemoryOrdering(SDValue(N, 1), SDValue(User, 1));
         SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, dl,
                                            RegVT.getSizeInBits());
         Extract = DAG.getBitcast(RegVT, Extract);
@@ -59441,10 +59442,8 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   // If we're splatting the lower half subvector of a full vector load into the
   // upper half, just splat the subvector directly, potentially creating a
   // subvector broadcast.
-  // TODO: Drop hasOneUse checks.
   if ((int)IdxVal == (VecNumElts / 2) &&
-      Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits()) &&
-      (Vec.hasOneUse() || SubVec.hasOneUse())) {
+      Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
     auto *VecLd = dyn_cast<LoadSDNode>(Vec);
     auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
     if (VecLd && SubLd &&
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index f2e4da0ac5400..7fcca526e460c 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -3367,7 +3367,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[0,0],ymm0[1,3],ymm1[4,4],ymm0[5,7]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = mem[0,1,0,1]
+; AVX-NEXT:    vmovddup {{.*#+}} xmm1 = xmm1[0,0]
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX-NEXT:    vpaddb 16(%rsi), %xmm2, %xmm2
 ; AVX-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 8fd8e0e8120c1..edc8404993996 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -1683,33 +1683,32 @@ define void @interleave_24i32_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
 ;
 ; AVX1-LABEL: interleave_24i32_in:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovupd (%rcx), %ymm0
-; AVX1-NEXT:    vmovups (%rdx), %xmm1
-; AVX1-NEXT:    vmovups 16(%rdx), %xmm2
-; AVX1-NEXT:    vmovups (%rsi), %xmm3
-; AVX1-NEXT:    vmovups 16(%rsi), %xmm4
-; AVX1-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm2[3,3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
+; AVX1-NEXT:    vmovups (%rdx), %xmm0
+; AVX1-NEXT:    vmovups 16(%rdx), %xmm1
+; AVX1-NEXT:    vmovups (%rsi), %xmm2
+; AVX1-NEXT:    vmovups 16(%rsi), %xmm3
+; AVX1-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[3,3],xmm1[3,3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT:    vbroadcastsd (%rcx), %ymm3
-; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX1-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
-; AVX1-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
-; AVX1-NEXT:    vmovups %ymm0, 32(%rdi)
-; AVX1-NEXT:    vmovups %ymm1, (%rdi)
-; AVX1-NEXT:    vmovups %ymm2, 64(%rdi)
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm2[1],xmm0[1]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm3 = xmm0[1,1],xmm3[0,2]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vbroadcastsd (%rcx), %ymm2
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX1-NEXT:    vpermilps {{.*#+}} ymm2 = mem[0,0,3,3,4,4,7,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,0,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX1-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
+; AVX1-NEXT:    vmovups %ymm2, 32(%rdi)
+; AVX1-NEXT:    vmovups %ymm0, (%rdi)
+; AVX1-NEXT:    vmovups %ymm1, 64(%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1804,30 +1803,29 @@ define void @interleave_24i32_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
 ; XOP-NEXT:    vmovups (%rsi), %ymm0
 ; XOP-NEXT:    vmovups (%rdx), %ymm1
 ; XOP-NEXT:    vpermil2ps {{.*#+}} ymm0 = ymm0[u,3],ymm1[3],ymm0[u,4],ymm1[4],ymm0[u,5]
-; XOP-NEXT:    vmovups (%rcx), %ymm1
-; XOP-NEXT:    vmovups (%rdx), %xmm2
-; XOP-NEXT:    vmovups 16(%rdx), %xmm3
-; XOP-NEXT:    vmovups (%rsi), %xmm4
-; XOP-NEXT:    vmovups 16(%rsi), %xmm5
-; XOP-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm3[3,3]
-; XOP-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm3[1]
-; XOP-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm5[0,2]
-; XOP-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; XOP-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[2,3,2,3]
-; XOP-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0,0,3,3]
-; XOP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
-; XOP-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm2[1]
-; XOP-NEXT:    vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2]
-; XOP-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; XOP-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1]
+; XOP-NEXT:    vmovups (%rdx), %xmm1
+; XOP-NEXT:    vmovups 16(%rdx), %xmm2
+; XOP-NEXT:    vmovups (%rsi), %xmm3
+; XOP-NEXT:    vmovups 16(%rsi), %xmm4
+; XOP-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm2[3,3]
+; XOP-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
+; XOP-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
 ; XOP-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; XOP-NEXT:    vbroadcastsd (%rcx), %ymm4
-; XOP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
-; XOP-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; XOP-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
+; XOP-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
+; XOP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
+; XOP-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
+; XOP-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
+; XOP-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; XOP-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
+; XOP-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; XOP-NEXT:    vbroadcastsd (%rcx), %ymm3
+; XOP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
+; XOP-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
+; XOP-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7]
 ; XOP-NEXT:    vmovups %ymm0, 32(%rdi)
-; XOP-NEXT:    vmovups %ymm2, (%rdi)
-; XOP-NEXT:    vmovups %ymm3, 64(%rdi)
+; XOP-NEXT:    vmovups %ymm1, (%rdi)
+; XOP-NEXT:    vmovups %ymm2, 64(%rdi)
 ; XOP-NEXT:    vzeroupper
 ; XOP-NEXT:    retq
   %s1 = load <8 x i32>, ptr %q1, align 4
diff --git a/llvm/test/CodeGen/X86/vector-interleave.ll b/llvm/test/CodeGen/X86/vector-interleave.ll
index 206f7ed43fd6d..aec3140c73fb2 100644
--- a/llvm/test/CodeGen/X86/vector-interleave.ll
+++ b/llvm/test/CodeGen/X86/vector-interleave.ll
@@ -576,12 +576,12 @@ define void @splat2_i64(ptr %s, ptr %d) {
 ;
 ; AVX1-LABEL: splat2_i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vperm2f128 $51, (%rdi), %ymm0, %ymm0 # ymm0 = mem[2,3,2,3]
+; AVX1-NEXT:    vbroadcastf128 (%rdi), %ymm0 # ymm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0,0,3,3]
-; AVX1-NEXT:    vbroadcastf128 (%rdi), %ymm1 # ymm1 = mem[0,1,0,1]
+; AVX1-NEXT:    vbroadcastf128 16(%rdi), %ymm1 # ymm1 = mem[0,1,0,1]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
-; AVX1-NEXT:    vmovupd %ymm0, 32(%rsi)
-; AVX1-NEXT:    vmovupd %ymm1, (%rsi)
+; AVX1-NEXT:    vmovupd %ymm1, 32(%rsi)
+; AVX1-NEXT:    vmovupd %ymm0, (%rsi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
index ac0522a9e68f4..39230b67d380f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
@@ -458,33 +458,32 @@ define void @store_i32_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX-LABEL: store_i32_stride3_vf8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovapd (%rdx), %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %xmm1
-; AVX-NEXT:    vmovaps 16(%rsi), %xmm2
-; AVX-NEXT:    vmovaps (%rdi), %xmm3
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm4
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm3[1],xmm1[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm1[1,1],xmm5[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; AVX-NEXT:    vbroadcastsd (%rdx), %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[3,3],xmm2[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
-; AVX-NEXT:    vmovaps %ymm0, 32(%rcx)
-; AVX-NEXT:    vmovaps %ymm2, 64(%rcx)
-; AVX-NEXT:    vmovaps %ymm1, (%rcx)
+; AVX-NEXT:    vmovaps (%rsi), %xmm0
+; AVX-NEXT:    vmovaps 16(%rsi), %xmm1
+; AVX-NEXT:    vmovaps (%rdi), %xmm2
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm3
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm2[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm0[1,1],xmm4[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastsd (%rdx), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[3,3],xmm1[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm2 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
+; AVX-NEXT:    vmovaps %ymm2, 32(%rcx)
+; AVX-NEXT:    vmovaps %ymm1, 64(%rcx)
+; AVX-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -780,60 +779,58 @@ define void @store_i32_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride3_vf16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovapd (%rdx), %ymm1
-; AVX-NEXT:    vmovapd 32(%rdx), %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %xmm2
-; AVX-NEXT:    vmovaps 16(%rsi), %xmm3
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm4
-; AVX-NEXT:    vmovaps 48(%rsi), %xmm5
-; AVX-NEXT:    vmovaps (%rdi), %xmm6
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm7
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm8
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm6[1],xmm2[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm2[1,1],xmm9[0,2]
+; AVX-NEXT:    vmovaps (%rsi), %xmm0
+; AVX-NEXT:    vmovaps 16(%rsi), %xmm1
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm2
+; AVX-NEXT:    vmovaps 48(%rsi), %xmm3
+; AVX-NEXT:    vmovaps (%rdi), %xmm4
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm5
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm6
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm4[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm0[1,1],xmm7[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm4[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastsd (%rdx), %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
+; AVX-NEXT:    vmovaps 48(%rdi), %xmm4
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm4[3,3],xmm3[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm4[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm3, %ymm3
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm6[1],xmm2[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm2[1,1],xmm4[0,2]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm6[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm2, %ymm2
-; AVX-NEXT:    vbroadcastsd (%rdx), %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7]
-; AVX-NEXT:    vmovaps 48(%rdi), %xmm6
-; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm6[3,3],xmm5[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[1,1],xmm6[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm5, %ymm5
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm8[1],xmm4[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm4[1,1],xmm6[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm8[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[2,0],xmm8[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm4, %ymm4
-; AVX-NEXT:    vbroadcastsd 32(%rdx), %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm7[3,3],xmm3[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm7[1],xmm3[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm7[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm1[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6],ymm6[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1,2],ymm1[3],ymm6[4,5],ymm1[6],ymm6[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm6[1,2],ymm0[3],ymm6[4,5],ymm0[6],ymm6[7]
-; AVX-NEXT:    vmovaps %ymm0, 128(%rcx)
-; AVX-NEXT:    vmovaps %ymm1, 32(%rcx)
-; AVX-NEXT:    vmovaps %ymm3, 64(%rcx)
-; AVX-NEXT:    vmovaps %ymm4, 96(%rcx)
-; AVX-NEXT:    vmovaps %ymm5, 160(%rcx)
-; AVX-NEXT:    vmovaps %ymm2, (%rcx)
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX-NEXT:    vbroadcastsd 32(%rdx), %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm5[3,3],xmm1[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,1],xmm5[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3],ymm4[4],ymm1[5,6],ymm4[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm4 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm5 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX-NEXT:    vmovaps %ymm5, 128(%rcx)
+; AVX-NEXT:    vmovaps %ymm4, 32(%rcx)
+; AVX-NEXT:    vmovaps %ymm1, 64(%rcx)
+; AVX-NEXT:    vmovaps %ymm2, 96(%rcx)
+; AVX-NEXT:    vmovaps %ymm3, 160(%rcx)
+; AVX-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -1378,114 +1375,110 @@ define void @store_i32_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride3_vf32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovapd (%rdx), %ymm4
-; AVX-NEXT:    vmovapd 32(%rdx), %ymm2
-; AVX-NEXT:    vmovapd 64(%rdx), %ymm3
-; AVX-NEXT:    vmovapd 96(%rdx), %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %xmm1
-; AVX-NEXT:    vmovaps 16(%rsi), %xmm7
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm10
-; AVX-NEXT:    vmovaps 48(%rsi), %xmm9
-; AVX-NEXT:    vmovaps (%rdi), %xmm5
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm8
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm11
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm5[1],xmm1[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm1[1,1],xmm6[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm5[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
-; AVX-NEXT:    vbroadcastsd (%rdx), %ymm5
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3,4],ymm5[5],ymm1[6,7]
-; AVX-NEXT:    vmovaps 80(%rsi), %xmm5
-; AVX-NEXT:    vmovaps 80(%rdi), %xmm6
-; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm6[3,3],xmm5[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[1,1],xmm6[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm5, %ymm5
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm3[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6],ymm6[7]
-; AVX-NEXT:    vmovaps 64(%rsi), %xmm6
-; AVX-NEXT:    vmovaps 64(%rdi), %xmm12
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm13 = xmm12[1],xmm6[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm6[1,1],xmm13[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm12[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[2,0],xmm12[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm6, %ymm6
-; AVX-NEXT:    vbroadcastsd 64(%rdx), %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3,4],ymm12[5],ymm6[6,7]
-; AVX-NEXT:    vmovaps 48(%rdi), %xmm12
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm12[3,3],xmm9[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm12[1],xmm9[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm9[1,1],xmm12[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm9, %ymm9
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm2[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3],ymm12[4],ymm9[5,6],ymm12[7]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm11[1],xmm10[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm10[1,1],xmm12[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm10[0],xmm11[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm10[2,0],xmm11[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm10, %ymm10
-; AVX-NEXT:    vbroadcastsd 32(%rdx), %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
-; AVX-NEXT:    vmovaps 112(%rsi), %xmm11
-; AVX-NEXT:    vmovaps 112(%rdi), %xmm12
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm12[3,3],xmm11[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm12[1],xmm11[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm11[1,1],xmm12[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm11, %ymm11
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6],ymm12[7]
-; AVX-NEXT:    vmovaps 96(%rsi), %xmm12
-; AVX-NEXT:    vmovaps 96(%rdi), %xmm13
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm13[1],xmm12[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm12[1,1],xmm14[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm12[0],xmm13[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[2,0],xmm13[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm12, %ymm12
-; AVX-NEXT:    vbroadcastsd 96(%rdx), %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm8[3,3],xmm7[3,3]
+; AVX-NEXT:    vmovaps (%rsi), %xmm0
+; AVX-NEXT:    vmovaps 16(%rsi), %xmm3
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm6
+; AVX-NEXT:    vmovaps 48(%rsi), %xmm5
+; AVX-NEXT:    vmovaps (%rdi), %xmm1
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm4
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm7
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastsd (%rdx), %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vmovaps 80(%rsi), %xmm1
+; AVX-NEXT:    vmovaps 80(%rdi), %xmm2
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm2[3,3],xmm1[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm1, %ymm1
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX-NEXT:    vmovaps 64(%rsi), %xmm2
+; AVX-NEXT:    vmovaps 64(%rdi), %xmm8
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm8[1],xmm2[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm2[1,1],xmm9[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm8[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm8[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm2, %ymm2
+; AVX-NEXT:    vbroadcastsd 64(%rdx), %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm8[2],ymm2[3,4],ymm8[5],ymm2[6,7]
+; AVX-NEXT:    vmovaps 48(%rdi), %xmm8
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm8[3,3],xmm5[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm8[1],xmm5[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[1,1],xmm8[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm5, %ymm5
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3],ymm8[4],ymm5[5,6],ymm8[7]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm7[1],xmm6[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm6[1,1],xmm8[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[2,0],xmm7[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
+; AVX-NEXT:    vbroadcastsd 32(%rdx), %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7]
+; AVX-NEXT:    vmovaps 112(%rsi), %xmm7
+; AVX-NEXT:    vmovaps 112(%rdi), %xmm8
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm8[3,3],xmm7[3,3]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm8[1],xmm7[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[1,1],xmm8[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm7, %ymm7
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm4[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm7, %ymm7
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0,0,3,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5,6],ymm8[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm8[1,2],ymm4[3],ymm8[4,5],ymm4[6],ymm8[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm8[1,2],ymm3[3],ymm8[4,5],ymm3[6],ymm8[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm8[1,2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm8 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2],ymm13[3,4],ymm8[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1,2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7]
-; AVX-NEXT:    vmovaps %ymm0, 320(%rcx)
-; AVX-NEXT:    vmovaps %ymm2, 128(%rcx)
-; AVX-NEXT:    vmovaps %ymm3, 224(%rcx)
+; AVX-NEXT:    vmovaps 96(%rsi), %xmm8
+; AVX-NEXT:    vmovaps 96(%rdi), %xmm9
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm9[1],xmm8[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm8[1,1],xmm10[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm8[0],xmm9[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm8[2,0],xmm9[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm8, %ymm8
+; AVX-NEXT:    vbroadcastsd 96(%rdx), %ymm9
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm4[3,3],xmm3[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm4[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm3, %ymm3
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm4 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm9 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2],ymm9[3,4],ymm4[5],ymm9[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm9 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0],ymm4[1,2],ymm9[3],ymm4[4,5],ymm9[6],ymm4[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm9 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm10 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1,2],ymm10[3],ymm9[4,5],ymm10[6],ymm9[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm10 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm11 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm11 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2],ymm11[3],ymm10[4,5],ymm11[6],ymm10[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm11 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm12 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2],ymm12[3,4],ymm11[5],ymm12[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm12 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
+; AVX-NEXT:    vmovaps %ymm11, 320(%rcx)
+; AVX-NEXT:    vmovaps %ymm10, 128(%rcx)
+; AVX-NEXT:    vmovaps %ymm9, 224(%rcx)
 ; AVX-NEXT:    vmovaps %ymm4, 32(%rcx)
-; AVX-NEXT:    vmovaps %ymm7, 64(%rcx)
-; AVX-NEXT:    vmovaps %ymm12, 288(%rcx)
-; AVX-NEXT:    vmovaps %ymm11, 352(%rcx)
-; AVX-NEXT:    vmovaps %ymm10, 96(%rcx)
-; AVX-NEXT:    vmovaps %ymm9, 160(%rcx)
-; AVX-NEXT:    vmovaps %ymm6, 192(%rcx)
-; AVX-NEXT:    vmovaps %ymm5, 256(%rcx)
-; AVX-NEXT:    vmovaps %ymm1, (%rcx)
+; AVX-NEXT:    vmovaps %ymm3, 64(%rcx)
+; AVX-NEXT:    vmovaps %ymm8, 288(%rcx)
+; AVX-NEXT:    vmovaps %ymm7, 352(%rcx)
+; AVX-NEXT:    vmovaps %ymm6, 96(%rcx)
+; AVX-NEXT:    vmovaps %ymm5, 160(%rcx)
+; AVX-NEXT:    vmovaps %ymm2, 192(%rcx)
+; AVX-NEXT:    vmovaps %ymm1, 256(%rcx)
+; AVX-NEXT:    vmovaps %ymm0, (%rcx)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -2532,11 +2525,7 @@ define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride3_vf64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $200, %rsp
-; AVX-NEXT:    vmovapd (%rdx), %ymm8
-; AVX-NEXT:    vmovapd 32(%rdx), %ymm9
-; AVX-NEXT:    vmovapd 64(%rdx), %ymm11
-; AVX-NEXT:    vmovapd 96(%rdx), %ymm12
+; AVX-NEXT:    subq $168, %rsp
 ; AVX-NEXT:    vmovaps (%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 16(%rsi), %xmm1
 ; AVX-NEXT:    vmovaps 32(%rsi), %xmm2
@@ -2556,7 +2545,7 @@ define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm1[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[0,2]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm8[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -2573,193 +2562,187 @@ define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm0[0,2]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm9[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 64(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 64(%rdi), %xmm1
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm0[1,1],xmm4[0,2]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX-NEXT:    vbroadcastsd 64(%rdx), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 80(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 80(%rdi), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm1[3,3],xmm0[3,3]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm11[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 96(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 96(%rdi), %xmm1
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm0[1,1],xmm6[0,2]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
 ; AVX-NEXT:    vbroadcastsd 96(%rdx), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 112(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 112(%rdi), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm1[3,3],xmm0[3,3]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm12[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 128(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 128(%rdi), %xmm1
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm0[1,1],xmm8[0,2]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm0
 ; AVX-NEXT:    vbroadcastsd 128(%rdx), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 144(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 144(%rdi), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm1[3,3],xmm0[3,3]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
-; AVX-NEXT:    vmovapd 128(%rdx), %ymm6
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm6[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX-NEXT:    vmovaps 160(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 160(%rdi), %xmm1
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm0[1,1],xmm10[0,2]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm0
 ; AVX-NEXT:    vbroadcastsd 160(%rdx), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovaps 176(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 176(%rdi), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm1[3,3],xmm0[3,3]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovapd 160(%rdx), %ymm4
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm4[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX-NEXT:    vmovaps 192(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 192(%rdi), %xmm1
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm1[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm0[1,1],xmm3[0,2]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm0[1,1],xmm12[0,2]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm0
 ; AVX-NEXT:    vbroadcastsd 192(%rdx), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovaps 208(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 208(%rdi), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm1[3,3],xmm0[3,3]
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX-NEXT:    vmovapd 192(%rdx), %ymm2
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX-NEXT:    vmovaps 224(%rsi), %xmm0
-; AVX-NEXT:    vmovaps 224(%rdi), %xmm3
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm3[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm0[1,1],xmm5[0,2]
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[2,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX-NEXT:    vbroadcastsd 224(%rdx), %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
+; AVX-NEXT:    vmovaps 224(%rdi), %xmm1
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm0[1,1],xmm14[0,2]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastsd 224(%rdx), %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovaps 240(%rsi), %xmm0
-; AVX-NEXT:    vmovaps 240(%rdi), %xmm3
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm3[3,3],xmm0[3,3]
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm3[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vmovapd 224(%rdx), %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0,0,3,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm1[1,2],ymm9[3],ymm1[4,5],ymm9[6],ymm1[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm11 = ymm11[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm11[0],ymm1[1,2],ymm11[3],ymm1[4,5],ymm11[6],ymm1[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm11 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm13[0,1],ymm11[2],ymm13[3,4],ymm11[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0],ymm11[1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm12[1,2],ymm6[3],ymm12[4,5],ymm6[6],ymm12[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm12[1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7]
-; AVX-NEXT:    vpermilps {{.*#+}} ymm12 = mem[0,0,3,3,4,4,7,7]
-; AVX-NEXT:    vpermilpd {{.*#+}} ymm13 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm12[1,2],ymm0[3],ymm12[4,5],ymm0[6],ymm12[7]
-; AVX-NEXT:    vmovaps %ymm0, 704(%rcx)
-; AVX-NEXT:    vmovaps %ymm2, 608(%rcx)
-; AVX-NEXT:    vmovaps %ymm4, 512(%rcx)
-; AVX-NEXT:    vmovaps %ymm6, 416(%rcx)
-; AVX-NEXT:    vmovaps %ymm11, 320(%rcx)
-; AVX-NEXT:    vmovaps %ymm1, 224(%rcx)
-; AVX-NEXT:    vmovaps %ymm9, 128(%rcx)
-; AVX-NEXT:    vmovaps %ymm8, 32(%rcx)
-; AVX-NEXT:    vmovaps %ymm3, 736(%rcx)
-; AVX-NEXT:    vmovaps %ymm5, 672(%rcx)
-; AVX-NEXT:    vmovaps %ymm7, 640(%rcx)
-; AVX-NEXT:    vmovaps %ymm10, 576(%rcx)
-; AVX-NEXT:    vmovaps %ymm15, 544(%rcx)
-; AVX-NEXT:    vmovaps %ymm14, 480(%rcx)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 448(%rcx)
+; AVX-NEXT:    vmovaps 240(%rdi), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[0,2]
+; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm0
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm0 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm0 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm2 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm2 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm4 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm4 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm5 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm5 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm6 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm6 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm7 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm7 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0],ymm6[1,2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7]
+; AVX-NEXT:    vpermilps {{.*#+}} ymm7 = mem[0,0,3,3,4,4,7,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
+; AVX-NEXT:    vpermilpd {{.*#+}} ymm8 = mem[1,1,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7]
+; AVX-NEXT:    vmovaps %ymm7, 704(%rcx)
+; AVX-NEXT:    vmovaps %ymm6, 608(%rcx)
+; AVX-NEXT:    vmovaps %ymm5, 512(%rcx)
+; AVX-NEXT:    vmovaps %ymm4, 416(%rcx)
+; AVX-NEXT:    vmovaps %ymm3, 320(%rcx)
+; AVX-NEXT:    vmovaps %ymm2, 224(%rcx)
+; AVX-NEXT:    vmovaps %ymm0, 128(%rcx)
+; AVX-NEXT:    vmovaps %ymm1, 32(%rcx)
+; AVX-NEXT:    vmovaps %ymm15, 736(%rcx)
+; AVX-NEXT:    vmovaps %ymm14, 672(%rcx)
+; AVX-NEXT:    vmovaps %ymm13, 640(%rcx)
+; AVX-NEXT:    vmovaps %ymm12, 576(%rcx)
+; AVX-NEXT:    vmovaps %ymm11, 544(%rcx)
+; AVX-NEXT:    vmovaps %ymm10, 480(%rcx)
+; AVX-NEXT:    vmovaps %ymm9, 448(%rcx)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 384(%rcx)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 352(%rcx)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 288(%rcx)
-; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 256(%rcx)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 256(%rcx)
+; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 192(%rcx)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 160(%rcx)
@@ -2769,7 +2752,7 @@ define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 64(%rcx)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, (%rcx)
-; AVX-NEXT:    addq $200, %rsp
+; AVX-NEXT:    addq $168, %rsp
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
index d591b7ad59b02..0fba7de803488 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
@@ -687,64 +687,62 @@ define void @store_i32_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX-LABEL: store_i32_stride5_vf8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovaps (%rdi), %ymm1
-; AVX-NEXT:    vmovaps (%rsi), %ymm3
-; AVX-NEXT:    vmovaps (%rdx), %ymm2
-; AVX-NEXT:    vmovaps (%rcx), %ymm4
-; AVX-NEXT:    vmovaps (%r8), %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %xmm6
-; AVX-NEXT:    vmovaps 16(%rsi), %xmm7
-; AVX-NEXT:    vmovaps (%rdi), %xmm8
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm9
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm8[1],xmm6[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm10 = xmm8[0],xmm6[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm10, %ymm5
-; AVX-NEXT:    vmovaps (%rdx), %xmm11
-; AVX-NEXT:    vmovaps (%rcx), %xmm12
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm12[0],xmm11[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm14
-; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm13, %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3],ymm5[4,5,6],ymm13[7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm10, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm10[0],ymm5[1,2,3],ymm10[4],ymm5[5,6,7]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm10 = zero,zero,xmm9[2],xmm7[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm9[1,1],xmm7[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm7, %ymm7
-; AVX-NEXT:    vshufps {{.*#+}} ymm9 = ymm4[1,1],ymm2[1,1],ymm4[5,5],ymm2[5,5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4],ymm7[5,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm0[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0],ymm7[1,2,3,4],ymm9[5],ymm7[6,7]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm10 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
-; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm11
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm11, %ymm10
-; AVX-NEXT:    vinsertps {{.*#+}} xmm6 = zero,zero,xmm8[2],xmm6[2]
+; AVX-NEXT:    vmovaps (%rdi), %ymm0
+; AVX-NEXT:    vmovaps (%rsi), %ymm2
+; AVX-NEXT:    vmovaps (%rdx), %ymm1
+; AVX-NEXT:    vmovaps (%rcx), %ymm3
+; AVX-NEXT:    vmovaps (%rsi), %xmm5
+; AVX-NEXT:    vmovaps (%rdi), %xmm6
+; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm6[1],xmm5[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
+; AVX-NEXT:    vmovaps (%rdx), %xmm7
+; AVX-NEXT:    vmovaps (%rcx), %xmm8
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm8[0],xmm7[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm9[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm10
+; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm9[2,3],ymm4[4,5,6],ymm9[7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm9[4],ymm4[5,6,7]
+; AVX-NEXT:    vmovaps 16(%rsi), %xmm10
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm11
+; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = zero,zero,xmm11[2],xmm10[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm11[1,1],xmm10[1,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm10, %ymm10
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm3[1,1],ymm1[1,1],ymm3[5,5],ymm1[5,5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3,4],ymm10[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0],ymm10[1,2,3,4],ymm11[5],ymm10[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm8
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm8, %ymm7
+; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm6[2],xmm5[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm9[1],ymm5[2,3,4,5],ymm9[6],ymm5[7]
+; AVX-NEXT:    vmovaps 16(%rcx), %xmm6
+; AVX-NEXT:    vmovaps 16(%rdx), %xmm7
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm7[3,3],xmm6[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm10[0,1],ymm6[2,3],ymm10[4,5],ymm6[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3,4,5],ymm8[6],ymm6[7]
-; AVX-NEXT:    vmovaps 16(%rcx), %xmm8
-; AVX-NEXT:    vmovaps 16(%rdx), %xmm10
-; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm10[3,3],xmm8[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm8, %ymm8
-; AVX-NEXT:    vshufps {{.*#+}} ymm10 = ymm3[3,3],ymm1[3,3],ymm3[7,7],ymm1[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3,4],ymm8[5,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4,5,6],ymm9[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4],ymm3[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm4[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm2[3,3],ymm0[3,3],ymm2[7,7],ymm0[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4],ymm6[5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm11[2],ymm6[3,4,5,6],ymm11[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vmovaps %ymm0, 64(%r9)
-; AVX-NEXT:    vmovaps %ymm8, 128(%r9)
-; AVX-NEXT:    vmovaps %ymm6, 32(%r9)
-; AVX-NEXT:    vmovaps %ymm7, 96(%r9)
-; AVX-NEXT:    vmovaps %ymm5, (%r9)
+; AVX-NEXT:    vmovaps %ymm6, 128(%r9)
+; AVX-NEXT:    vmovaps %ymm5, 32(%r9)
+; AVX-NEXT:    vmovaps %ymm10, 96(%r9)
+; AVX-NEXT:    vmovaps %ymm4, (%r9)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -1329,128 +1327,117 @@ define void @store_i32_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-LABEL: store_i32_stride5_vf16:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vmovaps (%rdi), %ymm0
-; AVX-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX-NEXT:    vmovaps 32(%rsi), %ymm3
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm2
-; AVX-NEXT:    vmovaps (%rsi), %xmm6
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm8
-; AVX-NEXT:    vmovaps (%rdi), %xmm10
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm9
-; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm10[1],xmm6[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = xmm10[0],xmm6[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX-NEXT:    vmovaps (%rsi), %ymm1
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm3
+; AVX-NEXT:    vmovaps (%rsi), %xmm8
+; AVX-NEXT:    vmovaps (%rdi), %xmm5
+; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm5[1],xmm8[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm6 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
 ; AVX-NEXT:    vmovaps (%rdx), %xmm11
-; AVX-NEXT:    vmovaps (%rcx), %xmm12
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm12[0],xmm11[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm13
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm7, %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3],ymm4[4,5,6],ymm7[7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm5, %ymm5
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3],ymm5[4],ymm4[5,6,7]
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm9[1],xmm8[1],zero
-; AVX-NEXT:    vmovaps 32(%rdx), %xmm13
-; AVX-NEXT:    vmovaps 32(%rcx), %xmm14
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm14[0],xmm13[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX-NEXT:    vmovaps (%rcx), %xmm14
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm14[0],xmm11[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm7
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5,6],ymm6[7]
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm9
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm7
+; AVX-NEXT:    vinsertps {{.*#+}} xmm6 = zero,xmm7[1],xmm9[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm10 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm10, %ymm10
+; AVX-NEXT:    vmovaps 32(%rdx), %xmm12
+; AVX-NEXT:    vmovaps 32(%rcx), %xmm13
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm13[0],xmm12[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[0,1,2,0]
 ; AVX-NEXT:    vbroadcastss 36(%rdx), %xmm15
-; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm7, %ymm7
-; AVX-NEXT:    vinsertps {{.*#+}} xmm15 = xmm9[0],xmm8[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm15, %ymm5
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3],ymm5[4,5,6],ymm7[7]
-; AVX-NEXT:    vmovaps 32(%rcx), %ymm7
-; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm15, %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm15[0],ymm5[1,2,3],ymm15[4],ymm5[5,6,7]
+; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm6, %ymm15
+; AVX-NEXT:    vmovaps 32(%rcx), %ymm6
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm15[2,3],ymm10[4,5,6],ymm15[7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm15[4],ymm4[5,6,7]
 ; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
-; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm12
-; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
-; AVX-NEXT:    vinsertps {{.*#+}} xmm6 = zero,zero,xmm10[2],xmm6[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm6, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm11[0,1],ymm6[2,3],ymm11[4,5],ymm6[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0],ymm10[1],ymm6[2,3,4,5],ymm10[6],ymm6[7]
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 48(%rsi), %xmm10
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm11 = xmm11[2],xmm14[2],xmm11[3],xmm14[3]
+; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm14
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm14, %ymm11
+; AVX-NEXT:    vinsertps {{.*#+}} xmm8 = zero,zero,xmm5[2],xmm8[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm8, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm11[0,1],ymm5[2,3],ymm11[4,5],ymm5[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2,3,4,5],ymm15[6],ymm5[7]
+; AVX-NEXT:    vmovaps 48(%rsi), %xmm8
 ; AVX-NEXT:    vmovaps 48(%rdi), %xmm11
-; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = zero,zero,xmm11[2],xmm10[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm11[1,1],xmm10[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm10, %ymm10
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm7[1,1],ymm2[1,1],ymm7[5,5],ymm2[5,5]
-; AVX-NEXT:    vmovaps %ymm2, %ymm6
+; AVX-NEXT:    vinsertps {{.*#+}} xmm14 = zero,zero,xmm11[2],xmm8[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm11[1,1],xmm8[1,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm8, %ymm8
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm6[1,1],ymm3[1,1],ymm6[5,5],ymm3[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3,4],ymm10[5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm11[3,4],ymm8[5,6,7]
 ; AVX-NEXT:    vmovaps 48(%rcx), %xmm11
-; AVX-NEXT:    vmovaps 48(%rdx), %xmm12
-; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm12[3,3],xmm11[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm11 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX-NEXT:    vmovaps 32(%r8), %ymm12
+; AVX-NEXT:    vmovaps 48(%rdx), %xmm14
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm14[3,3],xmm11[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm11 = xmm14[2],xmm11[2],xmm14[3],xmm11[3]
+; AVX-NEXT:    vmovaps 32(%rsi), %ymm14
 ; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm11, %ymm11
-; AVX-NEXT:    vshufps {{.*#+}} ymm15 = ymm3[3,3],ymm1[3,3],ymm3[7,7],ymm1[7,7]
-; AVX-NEXT:    vmovaps %ymm1, %ymm2
+; AVX-NEXT:    vshufps {{.*#+}} ymm15 = ymm14[3,3],ymm2[3,3],ymm14[7,7],ymm2[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm15[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm15[3,4],ymm11[5,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm12[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm15[0],ymm10[1,2,3,4],ymm15[5],ymm10[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm15[0],ymm8[1,2,3,4],ymm15[5],ymm8[6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm15[2],ymm11[3,4,5,6],ymm15[7]
-; AVX-NEXT:    vmovaps (%rsi), %ymm15
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm13 = xmm13[2],xmm14[2],xmm13[3],xmm14[3]
-; AVX-NEXT:    vbroadcastss 36(%rcx), %xmm14
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm14, %ymm13
-; AVX-NEXT:    vinsertps {{.*#+}} xmm8 = zero,zero,xmm9[2],xmm8[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2,3],ymm13[4,5],ymm8[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3,4,5],ymm9[6],ymm8[7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4],ymm10[5,6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; AVX-NEXT:    vbroadcastss 36(%rcx), %xmm13
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX-NEXT:    vinsertps {{.*#+}} xmm9 = zero,zero,xmm7[2],xmm9[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm9, %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3],ymm12[4,5],ymm7[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0],ymm15[1],ymm7[2,3,4,5],ymm15[6],ymm7[7]
 ; AVX-NEXT:    vmovaps 16(%rcx), %xmm9
-; AVX-NEXT:    vmovaps 16(%rdx), %xmm13
-; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm13[3,3],xmm9[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
-; AVX-NEXT:    vmovaps 16(%rsi), %xmm13
-; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm9, %ymm9
-; AVX-NEXT:    vmovaps %ymm0, %ymm5
-; AVX-NEXT:    vshufps {{.*#+}} ymm14 = ymm15[3,3],ymm0[3,3],ymm15[7,7],ymm0[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm14[3,4],ymm9[5,6,7]
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm14
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,zero,xmm14[2],xmm13[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm14[1,1],xmm13[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm13, %ymm0
+; AVX-NEXT:    vmovaps 16(%rdx), %xmm12
+; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm12[3,3],xmm9[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm12[2],xmm9[2],xmm12[3],xmm9[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm9, %ymm9
+; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm1[3,3],ymm0[3,3],ymm1[7,7],ymm0[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm12[3,4],ymm9[5,6,7]
+; AVX-NEXT:    vmovaps 16(%rsi), %xmm12
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm13
+; AVX-NEXT:    vinsertps {{.*#+}} xmm15 = zero,zero,xmm13[2],xmm12[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm13[1,1],xmm12[1,1]
 ; AVX-NEXT:    vmovaps (%rdx), %ymm13
-; AVX-NEXT:    vmovaps (%rcx), %ymm14
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm14[1,1],ymm13[1,1],ymm14[5,5],ymm13[5,5]
+; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm12, %ymm12
+; AVX-NEXT:    vmovaps (%rcx), %ymm15
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm15[1,1],ymm13[1,1],ymm15[5,5],ymm13[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm4[3,4],ymm0[5,6,7]
-; AVX-NEXT:    vmovaps (%r8), %ymm4
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm0[2],ymm9[3,4,5,6],ymm0[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm7[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm6[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm12[3],ymm1[4,5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm15[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4],ymm2[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm14[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm7 = ymm13[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0,1],ymm3[2],ymm7[3,4,5,6],ymm3[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm12[0,1,2],ymm4[3,4],ymm12[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm12[2],ymm9[3,4,5,6],ymm12[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm12[0],ymm4[1,2,3,4],ymm12[5],ymm4[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm14[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1,2,3],ymm2[4],ymm12[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4,5,6],ymm6[7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3],ymm2[4,5],ymm3[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5,6,7]
-; AVX-NEXT:    vmovaps %ymm2, 64(%r9)
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm15[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm13[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4,5,6],ymm1[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],mem[3],ymm2[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vmovaps %ymm0, 64(%r9)
 ; AVX-NEXT:    vmovaps %ymm1, 224(%r9)
-; AVX-NEXT:    vmovaps %ymm0, 96(%r9)
+; AVX-NEXT:    vmovaps %ymm4, 96(%r9)
 ; AVX-NEXT:    vmovaps %ymm9, 128(%r9)
-; AVX-NEXT:    vmovaps %ymm8, 192(%r9)
+; AVX-NEXT:    vmovaps %ymm7, 192(%r9)
 ; AVX-NEXT:    vmovaps %ymm11, 288(%r9)
-; AVX-NEXT:    vmovaps %ymm10, 256(%r9)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 32(%r9)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 160(%r9)
+; AVX-NEXT:    vmovaps %ymm8, 256(%r9)
+; AVX-NEXT:    vmovaps %ymm5, 32(%r9)
+; AVX-NEXT:    vmovaps %ymm10, 160(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, (%r9)
 ; AVX-NEXT:    vzeroupper
@@ -2770,156 +2757,152 @@ define void @store_i32_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride5_vf32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $600, %rsp # imm = 0x258
-; AVX-NEXT:    vmovaps (%rsi), %xmm6
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm3
-; AVX-NEXT:    vmovaps (%rdi), %xmm9
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm9[1],xmm6[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm9[0],xmm6[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vmovaps (%rdx), %xmm14
-; AVX-NEXT:    vmovaps (%rcx), %xmm15
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm15[0],xmm14[0]
+; AVX-NEXT:    subq $504, %rsp # imm = 0x1F8
+; AVX-NEXT:    vmovaps (%rsi), %xmm14
+; AVX-NEXT:    vmovaps (%rdi), %xmm12
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm12[1],xmm14[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm12[0],xmm14[0],xmm12[1],xmm14[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX-NEXT:    vmovaps (%rdx), %xmm11
+; AVX-NEXT:    vmovaps (%rcx), %xmm0
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm11[0]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm5
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6],ymm2[7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm1, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm4[1],xmm3[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm4[0],xmm3[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vmovaps 32(%rdx), %xmm2
-; AVX-NEXT:    vmovaps 32(%rcx), %xmm5
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm5[0],xmm2[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 36(%rdx), %xmm8
-; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm7[2,3],ymm0[4,5,6],ymm7[7]
-; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm1, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm3
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6],ymm2[7]
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm3
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,xmm2[1],xmm3[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
+; AVX-NEXT:    vmovaps 32(%rdx), %xmm7
+; AVX-NEXT:    vmovaps 32(%rcx), %xmm6
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm7[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 36(%rdx), %xmm5
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3],ymm1[4,5,6],ymm4[7]
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 64(%rsi), %xmm4
+; AVX-NEXT:    vmovaps 64(%rdi), %xmm1
+; AVX-NEXT:    vinsertps {{.*#+}} xmm8 = zero,xmm1[1],xmm4[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm9, %ymm8
+; AVX-NEXT:    vmovaps 64(%rdx), %xmm5
+; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 64(%rcx), %xmm10
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm10[0],xmm5[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm9[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 68(%rdx), %xmm5
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm9, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm8[0,1],ymm5[2,3],ymm8[4,5,6],ymm5[7]
+; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rsi), %xmm9
+; AVX-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 96(%rdi), %xmm8
+; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm8[1],xmm9[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm15 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm15, %ymm5
+; AVX-NEXT:    vmovaps 96(%rdx), %xmm9
+; AVX-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 96(%rcx), %xmm13
+; AVX-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm15 = xmm13[0],xmm9[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 100(%rdx), %xmm13
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm15, %ymm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm5[0,1],ymm13[2,3],ymm5[4,5,6],ymm13[7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm11[2],xmm0[2],xmm11[3],xmm0[3]
+; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm5
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm5, %ymm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm12[2],xmm14[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm5, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5],ymm5[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm9 = mem[0,1,2,3],ymm5[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5],ymm5[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rsi), %xmm1
-; AVX-NEXT:    vmovaps 64(%rdi), %xmm0
-; AVX-NEXT:    vmovaps 64(%rdx), %xmm7
-; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 64(%rcx), %xmm8
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm8[0],xmm7[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 68(%rdx), %xmm10
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm7, %ymm7
-; AVX-NEXT:    vinsertps {{.*#+}} xmm10 = zero,xmm0[1],xmm1[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm11 = xmm0[0],xmm1[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm11, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3],ymm10[4,5,6],ymm7[7]
-; AVX-NEXT:    vinsertf128 $1, 64(%r8), %ymm11, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2,3],ymm10[4],ymm7[5,6,7]
-; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rdx), %xmm10
-; AVX-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 96(%rcx), %xmm7
-; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm7[0],xmm10[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 100(%rdx), %xmm10
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm7, %ymm7
-; AVX-NEXT:    vmovaps 96(%rsi), %xmm13
-; AVX-NEXT:    vmovaps 96(%rdi), %xmm12
-; AVX-NEXT:    vinsertps {{.*#+}} xmm10 = zero,xmm12[1],xmm13[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm11 = xmm12[0],xmm13[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm11, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3],ymm10[4,5,6],ymm7[7]
-; AVX-NEXT:    vinsertf128 $1, 96(%r8), %ymm11, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0],ymm7[1,2,3],ymm10[4],ymm7[5,6,7]
-; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
-; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm10
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm10, %ymm7
-; AVX-NEXT:    vinsertps {{.*#+}} xmm6 = zero,zero,xmm9[2],xmm6[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm6, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 16(%rsi), %xmm6
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm7
-; AVX-NEXT:    vinsertps {{.*#+}} xmm9 = zero,zero,xmm7[2],xmm6[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm7[1,1],xmm6[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm6, %ymm6
-; AVX-NEXT:    vmovaps (%rdx), %ymm15
-; AVX-NEXT:    vmovaps (%rcx), %ymm7
-; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[1,1],ymm15[1,1],ymm7[5,5],ymm15[5,5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4],ymm6[5,6,7]
-; AVX-NEXT:    vmovaps 16(%rcx), %xmm7
-; AVX-NEXT:    vmovaps 16(%rdx), %xmm9
-; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm9[3,3],xmm7[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm7 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm7, %ymm7
-; AVX-NEXT:    vmovaps (%rdi), %ymm9
+; AVX-NEXT:    vmovaps 16(%rsi), %xmm0
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm5
+; AVX-NEXT:    vinsertps {{.*#+}} xmm11 = zero,zero,xmm5[2],xmm0[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm5[1,1],xmm0[1,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm0
+; AVX-NEXT:    vmovaps (%rdx), %ymm5
+; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rcx), %ymm9
 ; AVX-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rsi), %ymm10
-; AVX-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm9 = ymm10[3,3],ymm9[3,3],ymm10[7,7],ymm9[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4],ymm7[5,6,7]
-; AVX-NEXT:    vmovaps (%r8), %ymm9
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm9[1,1],ymm5[1,1],ymm9[5,5],ymm5[5,5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4],ymm0[5,6,7]
+; AVX-NEXT:    vmovaps 16(%rcx), %xmm5
+; AVX-NEXT:    vmovaps 16(%rdx), %xmm11
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm11[3,3],xmm5[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm5, %ymm5
+; AVX-NEXT:    vmovaps (%rdi), %ymm9
 ; AVX-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0],ymm6[1,2,3,4],ymm9[5],ymm6[6,7]
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm9[2],ymm7[3,4,5,6],ymm9[7]
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX-NEXT:    vmovaps (%rsi), %ymm11
+; AVX-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[3,3],ymm9[3,3],ymm11[7,7],ymm9[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm11[3,4],ymm5[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm11[0],ymm0[1,2,3,4],ymm11[5],ymm0[6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm11[2],ymm5[3,4,5,6],ymm11[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
 ; AVX-NEXT:    vbroadcastss 36(%rcx), %xmm5
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = zero,zero,xmm4[2],xmm3[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5],ymm3[6],ymm2[7]
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 48(%rsi), %xmm2
-; AVX-NEXT:    vmovaps 48(%rdi), %xmm3
-; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[1,1],xmm2[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm3
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm5, %ymm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = zero,zero,xmm2[2],xmm3[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm3 = mem[0,1,2,3],ymm2[4],mem[5,6,7]
 ; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 32(%rcx), %ymm4
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm4[1,1],ymm3[1,1],ymm4[5,5],ymm3[5,5]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 48(%rsi), %xmm0
+; AVX-NEXT:    vmovaps 48(%rdi), %xmm2
+; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = zero,zero,xmm2[2],xmm0[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[1,1],xmm0[1,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm2
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 32(%rcx), %ymm14
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm14[1,1],ymm2[1,1],ymm14[5,5],ymm2[5,5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4],ymm0[5,6,7]
+; AVX-NEXT:    vmovaps 48(%rcx), %xmm2
+; AVX-NEXT:    vmovaps 48(%rdx), %xmm3
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm3[3,3],xmm2[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX-NEXT:    vmovaps 32(%rdi), %ymm12
+; AVX-NEXT:    vmovaps 32(%rsi), %ymm11
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm11[3,3],ymm12[3,3],ymm11[7,7],ymm12[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4],ymm2[5,6,7]
-; AVX-NEXT:    vmovaps 48(%rcx), %xmm3
-; AVX-NEXT:    vmovaps 48(%rdx), %xmm4
-; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm3[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm3
-; AVX-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 32(%rsi), %ymm14
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm14[3,3],ymm4[3,3],ymm14[7,7],ymm4[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4],ymm3[5,6,7]
-; AVX-NEXT:    vmovaps 32(%r8), %ymm4
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3,4],ymm4[5],ymm2[6,7]
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm4[2],ymm3[3,4,5,6],ymm4[7]
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
-; AVX-NEXT:    vbroadcastss 68(%rcx), %xmm3
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm0[2],xmm1[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3]
+; AVX-NEXT:    vbroadcastss 68(%rcx), %xmm2
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm4[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 80(%rsi), %xmm0
@@ -2931,44 +2914,46 @@ define void @store_i32_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps 64(%rcx), %ymm9
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm9[1,1],ymm10[1,1],ymm9[5,5],ymm10[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX-NEXT:    vmovaps 80(%rcx), %xmm0
-; AVX-NEXT:    vmovaps 80(%rdx), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm1
-; AVX-NEXT:    vmovaps 64(%rdi), %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
+; AVX-NEXT:    vmovaps 80(%rcx), %xmm1
+; AVX-NEXT:    vmovaps 80(%rdx), %xmm2
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3],xmm1[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm3
+; AVX-NEXT:    vmovaps 64(%rdi), %ymm6
 ; AVX-NEXT:    vmovaps 64(%rsi), %ymm5
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm5[3,3],ymm8[3,3],ymm5[7,7],ymm8[7,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm5[3,3],ymm6[3,3],ymm5[7,7],ymm6[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2],ymm4[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 64(%r8), %ymm6
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm6[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm11[0],ymm3[1,2,3,4],ymm11[5],ymm3[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4],ymm3[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1,2,3,4],ymm4[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm11[2],ymm4[3,4,5,6],ymm11[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm4[2],ymm3[3,4,5,6],ymm4[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm3 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vbroadcastss 100(%rcx), %xmm4
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm12[2],xmm13[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm4, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm4[1],ymm3[2,3,4,5],ymm4[6],ymm3[7]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vbroadcastss 100(%rcx), %xmm3
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm3 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm3 = zero,zero,xmm8[2],mem[0]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm3[4],ymm13[5,6,7]
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4,5],ymm3[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 112(%rsi), %xmm3
-; AVX-NEXT:    vmovaps 112(%rdi), %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = zero,zero,xmm4[2],xmm3[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[1,1],xmm3[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm3, %ymm3
-; AVX-NEXT:    vmovaps 96(%rdx), %ymm12
-; AVX-NEXT:    vmovaps 96(%rcx), %ymm13
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm13[1,1],ymm12[1,1],ymm13[5,5],ymm12[5,5]
+; AVX-NEXT:    vmovaps 112(%rsi), %xmm0
+; AVX-NEXT:    vmovaps 112(%rdi), %xmm3
+; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm0[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm0[1,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm3
+; AVX-NEXT:    vmovaps 96(%rdx), %ymm13
+; AVX-NEXT:    vmovaps 96(%rcx), %ymm2
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm2[1,1],ymm13[1,1],ymm2[5,5],ymm13[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2],ymm4[3,4],ymm3[5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4],ymm3[5,6,7]
 ; AVX-NEXT:    vmovaps 112(%rcx), %xmm4
 ; AVX-NEXT:    vmovaps 112(%rdx), %xmm0
 ; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm0[3,3],xmm4[3,3]
@@ -2976,52 +2961,48 @@ define void @store_i32_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX-NEXT:    vmovaps 96(%rdi), %ymm1
 ; AVX-NEXT:    vmovaps 96(%rsi), %ymm4
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm4[3,3],ymm1[3,3],ymm4[7,7],ymm1[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4],ymm0[5,6,7]
-; AVX-NEXT:    vmovaps 96(%r8), %ymm2
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0],ymm7[1,2,3,4],ymm3[5],ymm7[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm3[2],ymm11[3,4,5,6],ymm3[7]
-; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = mem[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4],ymm11[5,6,7]
-; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm0 = mem[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm15 = ymm15[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2],ymm15[3,4,5,6],ymm0[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm11[0],ymm0[1,2,3],ymm11[4,5],ymm0[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm14[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4],ymm11[5,6,7]
-; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = mem[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm15 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4,5,6],ymm14[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0],ymm14[1,2,3],ymm11[4,5],ymm14[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = ymm11[0,1,2],mem[3],ymm11[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm15 = ymm4[3,3],ymm1[3,3],ymm4[7,7],ymm1[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4],ymm0[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0],ymm3[1,2,3,4],ymm0[5],ymm3[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm15[0,1],ymm0[2],ymm15[3,4,5,6],ymm0[7]
+; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = mem[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4],ymm0[5,6,7]
+; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm15 = mem[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm8 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm15[2],ymm8[3,4,5,6],ymm15[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1,2,3],ymm0[4,5],ymm8[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm11[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm12[4],ymm8[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm14[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm12 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2],ymm12[3,4,5,6],ymm11[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm11[1,2,3],ymm8[4,5],ymm11[6,7]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4],ymm5[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm9[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4],ymm5[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm9[0,1,3,0,4,5,7,4]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm9 = ymm10[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4,5,6],ymm8[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm8[1,2,3],ymm5[4,5],ymm8[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2],ymm9[3,4,5,6],ymm6[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3],ymm5[4,5],ymm6[6,7]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[3,0,2,3,7,4,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4],ymm4[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm13[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm12[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2],ymm6[3,4,5,6],ymm4[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1,2,3],ymm1[4,5],ymm4[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm13[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4,5,6],ymm2[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm8[0,1,2],mem[3],ymm8[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2],mem[3],ymm5[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],mem[3],ymm1[4,5,6,7]
 ; AVX-NEXT:    vmovaps %ymm1, 544(%r9)
-; AVX-NEXT:    vmovaps %ymm5, 384(%r9)
-; AVX-NEXT:    vmovaps %ymm11, 224(%r9)
+; AVX-NEXT:    vmovaps %ymm4, 384(%r9)
+; AVX-NEXT:    vmovaps %ymm2, 224(%r9)
 ; AVX-NEXT:    vmovaps %ymm0, 64(%r9)
 ; AVX-NEXT:    vmovaps %ymm3, 608(%r9)
 ; AVX-NEXT:    vmovaps %ymm7, 576(%r9)
@@ -3053,7 +3034,7 @@ define void @store_i32_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 160(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, (%r9)
-; AVX-NEXT:    addq $600, %rsp # imm = 0x258
+; AVX-NEXT:    addq $504, %rsp # imm = 0x1F8
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -5587,279 +5568,269 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride5_vf64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $1784, %rsp # imm = 0x6F8
-; AVX-NEXT:    vmovaps (%rsi), %xmm6
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm14
-; AVX-NEXT:    vmovaps (%rdi), %xmm5
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm15
-; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,xmm5[1],xmm6[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm5[0],xmm6[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX-NEXT:    vmovaps (%rdx), %xmm0
-; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps (%rcx), %xmm11
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm11[0],xmm0[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm7
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm4, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6],ymm4[7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm2[1,2,3],ymm3[4],ymm2[5,6,7]
+; AVX-NEXT:    subq $1576, %rsp # imm = 0x628
+; AVX-NEXT:    vmovaps (%rsi), %xmm2
+; AVX-NEXT:    vmovaps (%rdi), %xmm9
+; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,xmm9[1],xmm2[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm9[0],xmm2[0],xmm9[1],xmm2[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX-NEXT:    vmovaps (%rdx), %xmm6
+; AVX-NEXT:    vmovaps (%rcx), %xmm5
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm5[0],xmm6[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 4(%rdx), %xmm4
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6],ymm3[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,xmm15[1],xmm14[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = xmm15[0],xmm14[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm3
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm1
+; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm1[1],xmm3[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
 ; AVX-NEXT:    vmovaps 32(%rdx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 32(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm1[0],xmm0[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 36(%rdx), %xmm7
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm4, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3],ymm2[4,5,6],ymm4[7]
-; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm2[1,2,3],ymm3[4],ymm2[5,6,7]
+; AVX-NEXT:    vmovaps 32(%rcx), %xmm7
+; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 36(%rdx), %xmm8
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm7[2,3],ymm4[4,5,6],ymm7[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rsi), %xmm13
-; AVX-NEXT:    vmovaps 64(%rdi), %xmm10
+; AVX-NEXT:    vmovaps 64(%rsi), %xmm15
+; AVX-NEXT:    vmovaps 64(%rdi), %xmm14
+; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm14[1],xmm15[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
 ; AVX-NEXT:    vmovaps 64(%rdx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 64(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 68(%rdx), %xmm3
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm10[1],xmm13[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = xmm10[0],xmm13[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6],ymm2[7]
-; AVX-NEXT:    vinsertf128 $1, 64(%r8), %ymm4, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0],ymm2[1,2,3],ymm3[4],ymm2[5,6,7]
+; AVX-NEXT:    vmovaps 64(%rcx), %xmm7
+; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 68(%rdx), %xmm8
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm7[2,3],ymm4[4,5,6],ymm7[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rsi), %xmm10
+; AVX-NEXT:    vmovaps 96(%rdi), %xmm4
+; AVX-NEXT:    vinsertps {{.*#+}} xmm7 = zero,xmm4[1],xmm10[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm8, %ymm7
 ; AVX-NEXT:    vmovaps 96(%rdx), %xmm0
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vmovaps 96(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 100(%rdx), %xmm3
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm3
-; AVX-NEXT:    vmovaps 96(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 96(%rdi), %xmm7
-; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm7[1],xmm0[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm9 = xmm7[0],xmm0[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm9, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5,6],ymm3[7]
-; AVX-NEXT:    vinsertf128 $1, 96(%r8), %ymm9, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0],ymm3[1,2,3],ymm4[4],ymm3[5,6,7]
+; AVX-NEXT:    vmovaps 96(%rcx), %xmm8
+; AVX-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm8[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 100(%rdx), %xmm12
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6],ymm8[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 128(%rdx), %xmm0
-; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 128(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm1[0],xmm0[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 132(%rdx), %xmm4
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm4
 ; AVX-NEXT:    vmovaps 128(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 128(%rdi), %xmm8
-; AVX-NEXT:    vinsertps {{.*#+}} xmm9 = zero,xmm8[1],xmm0[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = xmm8[0],xmm0[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm12, %ymm9
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,3],ymm9[4,5,6],ymm4[7]
-; AVX-NEXT:    vinsertf128 $1, 128(%r8), %ymm12, %ymm9
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0],ymm4[1,2,3],ymm9[4],ymm4[5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 160(%rdx), %xmm0
+; AVX-NEXT:    vmovaps 128(%rdi), %xmm7
+; AVX-NEXT:    vinsertps {{.*#+}} xmm8 = zero,xmm7[1],xmm0[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm12 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm12, %ymm8
+; AVX-NEXT:    vmovaps 128(%rdx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 160(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm1[0],xmm0[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 164(%rdx), %xmm9
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm4, %ymm9
+; AVX-NEXT:    vmovaps 128(%rcx), %xmm11
+; AVX-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm11[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 132(%rdx), %xmm13
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm12, %ymm12
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm12[2,3],ymm8[4,5,6],ymm12[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 160(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 160(%rdi), %xmm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = zero,xmm4[1],xmm0[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm4[0],xmm0[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3],ymm12[4,5,6],ymm9[7]
-; AVX-NEXT:    vinsertf128 $1, 160(%r8), %ymm0, %ymm0
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm9[1,2,3],ymm0[4],ymm9[5,6,7]
+; AVX-NEXT:    vmovaps 160(%rdi), %xmm8
+; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = zero,xmm8[1],xmm0[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm13 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX-NEXT:    vmovaps 160(%rdx), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 160(%rcx), %xmm11
+; AVX-NEXT:    vmovaps %xmm11, (%rsp) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm11[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 164(%rdx), %xmm11
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm13, %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6],ymm11[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 192(%rdx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 192(%rcx), %xmm0
+; AVX-NEXT:    vmovaps 192(%rsi), %xmm12
+; AVX-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 192(%rdi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 196(%rdx), %xmm9
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm1
-; AVX-NEXT:    vmovaps 192(%rsi), %xmm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm11 = zero,xmm0[1],xmm12[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm12 = xmm0[0],xmm12[0],xmm0[1],xmm12[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX-NEXT:    vmovaps 192(%rdx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 192(%rdi), %xmm9
-; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = zero,xmm9[1],xmm0[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm9[0],xmm0[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm1[2,3],ymm12[4,5,6],ymm1[7]
-; AVX-NEXT:    vinsertf128 $1, 192(%r8), %ymm0, %ymm0
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX-NEXT:    vmovaps 192(%rcx), %xmm12
+; AVX-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm12[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 196(%rdx), %xmm13
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm12, %ymm12
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6],ymm12[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 224(%rdx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 224(%rcx), %xmm0
+; AVX-NEXT:    vmovaps 224(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1,2,0]
-; AVX-NEXT:    vbroadcastss 228(%rdx), %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
-; AVX-NEXT:    vmovaps 224(%rsi), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 224(%rdi), %xmm0
+; AVX-NEXT:    vmovaps 224(%rdi), %xmm12
+; AVX-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vinsertps {{.*#+}} xmm11 = zero,xmm12[1],xmm0[1],zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX-NEXT:    vmovaps 224(%rdx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,xmm0[1],xmm1[1],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm12 = xmm0[0],xmm1[0],zero,zero
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm12, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6],ymm3[7]
-; AVX-NEXT:    vinsertf128 $1, 224(%r8), %ymm12, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX-NEXT:    vmovaps 224(%rcx), %xmm12
+; AVX-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm12[0],xmm0[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[0,1,2,0]
+; AVX-NEXT:    vbroadcastss 228(%rdx), %xmm13
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm12, %ymm12
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6],ymm12[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
-; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm5[2],xmm6[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm5 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX-NEXT:    vbroadcastss 4(%rcx), %xmm6
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm9[2],xmm2[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm2, %ymm0
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm5 = mem[0,1,2,3],ymm2[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 16(%rsi), %xmm0
-; AVX-NEXT:    vmovaps 16(%rdi), %xmm1
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm1[2],xmm0[2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,1],xmm0[1,1]
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm2
+; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm2[2],xmm0[2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[1,1],xmm0[1,1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps (%rdx), %ymm11
-; AVX-NEXT:    vmovaps (%rcx), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm11[1,1],ymm1[5,5],ymm11[5,5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX-NEXT:    vmovaps 16(%rcx), %xmm1
-; AVX-NEXT:    vmovaps 16(%rdx), %xmm5
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm1[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
-; AVX-NEXT:    vmovaps (%rdi), %ymm2
+; AVX-NEXT:    vmovaps (%rdx), %ymm2
 ; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rsi), %ymm3
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
+; AVX-NEXT:    vmovaps (%rcx), %ymm5
+; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm5[1,1],ymm2[1,1],ymm5[5,5],ymm2[5,5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4],ymm0[5,6,7]
+; AVX-NEXT:    vmovaps 16(%rcx), %xmm2
+; AVX-NEXT:    vmovaps 16(%rdx), %xmm5
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm2[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; AVX-NEXT:    vmovaps (%rdi), %ymm5
+; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rsi), %ymm6
+; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm6[3,3],ymm5[3,3],ymm6[7,7],ymm5[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps (%r8), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm2[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4],ymm2[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1,2,3,4],ymm5[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm5[2],ymm1[3,4,5,6],ymm5[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm5[2],ymm2[3,4,5,6],ymm5[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vbroadcastss 36(%rcx), %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm15[2],xmm14[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm1, %ymm1
+; AVX-NEXT:    vbroadcastss 36(%rcx), %xmm2
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm3[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 48(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 48(%rdi), %xmm1
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm1[2],xmm0[2]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm0[2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,1],xmm0[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm14
-; AVX-NEXT:    vmovaps 32(%rcx), %ymm15
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm15[1,1],ymm14[1,1],ymm15[5,5],ymm14[5,5]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm1
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 32(%rcx), %ymm2
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX-NEXT:    vmovaps 48(%rcx), %xmm1
-; AVX-NEXT:    vmovaps 48(%rdx), %xmm5
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm1[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX-NEXT:    vmovaps 48(%rdx), %xmm2
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3],xmm1[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
 ; AVX-NEXT:    vmovaps 32(%rdi), %ymm2
 ; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 32(%rsi), %ymm3
 ; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 32(%r8), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm2[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1,2,3,4],ymm5[5],ymm0[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm5[2],ymm1[3,4,5,6],ymm5[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX-NEXT:    vbroadcastss 68(%rcx), %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm10[2],xmm13[2]
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm1, %ymm1
+; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm14[2],xmm15[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 80(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps 80(%rdi), %xmm1
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,zero,xmm1[2],xmm0[2]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm0[2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,1],xmm0[1,1]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 64(%rdx), %ymm13
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vmovaps 64(%rdx), %ymm14
 ; AVX-NEXT:    vmovaps 64(%rcx), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm13[1,1],ymm1[5,5],ymm13[5,5]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm14[1,1],ymm1[5,5],ymm14[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX-NEXT:    vmovaps 80(%rcx), %xmm1
-; AVX-NEXT:    vmovaps 80(%rdx), %xmm5
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm1[3,3]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX-NEXT:    vmovaps 80(%rdx), %xmm2
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3],xmm1[3,3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
 ; AVX-NEXT:    vmovaps 64(%rdi), %ymm2
 ; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 64(%rsi), %ymm3
 ; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 64(%r8), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm2[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0],ymm0[1,2,3,4],ymm5[5],ymm0[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm5[2],ymm1[3,4,5,6],ymm5[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX-NEXT:    vbroadcastss 100(%rcx), %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = zero,zero,xmm7[2],mem[0]
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm1, %ymm1
+; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = zero,zero,xmm4[2],xmm10[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 112(%rsi), %xmm0
@@ -5876,21 +5847,18 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX-NEXT:    vmovaps 112(%rcx), %xmm1
 ; AVX-NEXT:    vmovaps 112(%rdx), %xmm2
-; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm2[3,3],xmm1[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3],xmm1[3,3]
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; AVX-NEXT:    vmovaps 96(%rdi), %ymm3
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rsi), %ymm2
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX-NEXT:    vmovaps 96(%rdi), %ymm2
 ; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,3],ymm3[3,3],ymm2[7,7],ymm3[7,7]
+; AVX-NEXT:    vmovaps 96(%rsi), %ymm13
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm13[3,3],ymm2[3,3],ymm13[7,7],ymm2[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 96(%r8), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -5898,11 +5866,14 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX-NEXT:    vbroadcastss 132(%rcx), %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = zero,zero,xmm8[2],mem[0]
-; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm1, %ymm1
+; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm1 = zero,zero,xmm7[2],mem[0]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 144(%rsi), %xmm0
@@ -5922,30 +5893,30 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3],xmm1[3,3]
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX-NEXT:    vmovaps 128(%rdi), %ymm3
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 128(%rdi), %ymm12
 ; AVX-NEXT:    vmovaps 128(%rsi), %ymm2
 ; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,3],ymm3[3,3],ymm2[7,7],ymm3[7,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,3],ymm12[3,3],ymm2[7,7],ymm12[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 128(%r8), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    vunpckhps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX-NEXT:    vbroadcastss 164(%rcx), %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = zero,zero,xmm4[2],mem[0]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm1 = zero,zero,xmm8[2],mem[0]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 176(%rsi), %xmm0
@@ -5953,11 +5924,11 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm0[2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,1],xmm0[1,1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 160(%rdx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 160(%rcx), %ymm1
+; AVX-NEXT:    vmovaps 160(%rdx), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm2[1,1],ymm1[5,5],ymm2[5,5]
+; AVX-NEXT:    vmovaps 160(%rcx), %ymm2
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX-NEXT:    vmovaps 176(%rcx), %xmm1
@@ -5965,17 +5936,15 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3],xmm1[3,3]
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX-NEXT:    vmovaps 160(%rdi), %ymm12
-; AVX-NEXT:    vmovaps 160(%rsi), %ymm2
+; AVX-NEXT:    vmovaps 160(%rdi), %ymm2
 ; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,3],ymm12[3,3],ymm2[7,7],ymm12[7,7]
+; AVX-NEXT:    vmovaps 160(%rsi), %ymm11
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm11[3,3],ymm2[3,3],ymm11[7,7],ymm2[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 160(%r8), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -5983,11 +5952,15 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX-NEXT:    vbroadcastss 196(%rcx), %xmm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = zero,zero,xmm9[2],mem[0]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm1, %ymm1
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT:    vinsertps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm1 = zero,zero,xmm2[2],mem[0]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 208(%rsi), %xmm0
@@ -5995,11 +5968,9 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm0[2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,1],xmm0[1,1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 192(%rdx), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 192(%rcx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[1,1],ymm2[5,5],ymm1[5,5]
+; AVX-NEXT:    vmovaps 192(%rdx), %ymm10
+; AVX-NEXT:    vmovaps 192(%rcx), %ymm9
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm9[1,1],ymm10[1,1],ymm9[5,5],ymm10[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX-NEXT:    vmovaps 208(%rcx), %xmm1
@@ -6008,12 +5979,11 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
 ; AVX-NEXT:    vmovaps 192(%rdi), %ymm8
-; AVX-NEXT:    vmovaps 192(%rsi), %ymm9
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm9[3,3],ymm8[3,3],ymm9[7,7],ymm8[7,7]
+; AVX-NEXT:    vmovaps 192(%rsi), %ymm6
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm6[3,3],ymm8[3,3],ymm6[7,7],ymm8[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 192(%r8), %ymm10
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm10[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7]
@@ -6029,6 +5999,9 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps $239, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[0,1,2,3],ymm1[4],mem[5,6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 240(%rsi), %xmm0
@@ -6036,73 +6009,56 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = zero,zero,xmm1[2],xmm0[2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,1],xmm0[1,1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 224(%rdx), %ymm6
-; AVX-NEXT:    vmovaps 224(%rcx), %ymm4
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[1,1],ymm6[1,1],ymm4[5,5],ymm6[5,5]
+; AVX-NEXT:    vmovaps 224(%rdx), %ymm4
+; AVX-NEXT:    vmovaps 224(%rcx), %ymm3
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[1,1],ymm4[1,1],ymm3[5,5],ymm4[5,5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX-NEXT:    vmovaps 240(%rcx), %xmm0
 ; AVX-NEXT:    vmovaps 240(%rdx), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[3,3],xmm0[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm1[3,3],xmm0[3,3]
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm1
-; AVX-NEXT:    vmovaps 224(%rdi), %ymm2
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT:    vmovaps 224(%rdi), %ymm1
 ; AVX-NEXT:    vmovaps 224(%rsi), %ymm0
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm0[3,3],ymm2[3,3],ymm0[7,7],ymm2[7,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm1[0,1,2],ymm5[3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps 224(%r8), %ymm1
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm1[2,3,2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0],ymm3[1,2,3,4],ymm7[5],ymm3[6,7]
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm7[2],ymm5[3,4,5,6],ymm7[7]
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4],ymm3[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm0[3,3],ymm1[3,3],ymm0[7,7],ymm1[7,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3,4],ymm5[5,6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0],ymm2[1,2,3,4],ymm7[5],ymm2[6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm7[2],ymm5[3,4,5,6],ymm7[7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = ymm2[0,1,2,3],mem[4],ymm2[5,6,7]
 ; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX-NEXT:    # ymm5 = mem[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm7 = ymm11[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2],ymm7[3,4,5,6],ymm5[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1,2,3],ymm3[4,5],ymm5[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
-; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4],ymm3[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm15[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm7 = ymm14[1,0,2,2]
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm7 = mem[1,0,2,2]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2],ymm7[3,4,5,6],ymm5[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1,2,3],ymm3[4,5],ymm5[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm7 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
-; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4],ymm3[5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0],ymm5[1,2,3],ymm2[4,5],ymm5[6,7]
+; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = ymm2[0,1,2,3],mem[4],ymm2[5,6,7]
 ; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX-NEXT:    # ymm5 = mem[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm15 = ymm13[1,0,2,2]
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm15 = mem[1,0,2,2]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm15[0,1],ymm5[2],ymm15[3,4,5,6],ymm5[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1,2,3],ymm3[4,5],ymm5[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm5 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
-; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4],ymm3[5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm2[0],ymm5[1,2,3],ymm2[4,5],ymm5[6,7]
+; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = mem[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = ymm2[0,1,2,3],mem[4],ymm2[5,6,7]
 ; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
 ; AVX-NEXT:    # ymm15 = mem[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = mem[1,0,2,2]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm14 = ymm14[1,0,2,2]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4,5,6],ymm15[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm14[1,2,3],ymm3[4,5],ymm14[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
-; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = mem[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm14[1,2,3],ymm2[4,5],ymm14[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm14 = ymm13[3,0,2,3,7,4,6,7]
 ; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
 ; AVX-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4],ymm14[5,6,7]
 ; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
@@ -6111,8 +6067,6 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    # ymm13 = mem[1,0,2,2]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2],ymm13[3,4,5,6],ymm15[7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0],ymm13[1,2,3],ymm14[4,5],ymm13[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm13 = ymm13[0,1,2],mem[3],ymm13[4,5,6,7]
 ; AVX-NEXT:    vpermilps $227, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX-NEXT:    # ymm14 = mem[3,0,2,3,7,4,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm12[4],ymm14[5,6,7]
@@ -6122,32 +6076,43 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    # ymm12 = mem[1,0,2,2]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4,5,6],ymm15[7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm14[0],ymm12[1,2,3],ymm14[4,5],ymm12[6,7]
-; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm12 = ymm12[0,1,2],mem[3],ymm12[4,5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm9 = ymm9[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4],ymm9[5,6,7]
-; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm9 = mem[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = mem[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm14[0,1],ymm9[2],ymm14[3,4,5,6],ymm9[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm9[1,2,3],ymm8[4,5],ymm9[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3],ymm8[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4],ymm11[5,6,7]
+; AVX-NEXT:    vpermilps $52, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm14 = mem[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm15 = mem[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2],ymm15[3,4,5,6],ymm14[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0],ymm14[1,2,3],ymm11[4,5],ymm14[6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[3,0,2,3,7,4,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4],ymm6[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm9[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm9 = ymm10[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4,5,6],ymm8[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm8[1,2,3],ymm6[4,5],ymm8[6,7]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[3,0,2,3,7,4,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm4[0,1,3,0,4,5,7,4]
-; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm6[1,0,2,2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4,5,6],ymm2[7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[0,1,3,0,4,5,7,4]
+; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm4[1,0,2,2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4,5,6],ymm1[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1,2],mem[3],ymm7[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2],mem[3],ymm5[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],mem[3],ymm2[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm13[0,1,2],mem[3],ymm13[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2],mem[3],ymm12[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm11[0,1,2],mem[3],ymm11[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],mem[3],ymm6[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vmovaps %ymm0, 1184(%r9)
-; AVX-NEXT:    vmovaps %ymm8, 1024(%r9)
-; AVX-NEXT:    vmovaps %ymm12, 864(%r9)
-; AVX-NEXT:    vmovaps %ymm13, 704(%r9)
-; AVX-NEXT:    vmovaps %ymm3, 544(%r9)
-; AVX-NEXT:    vmovaps %ymm5, 384(%r9)
-; AVX-NEXT:    vmovaps %ymm7, 224(%r9)
-; AVX-NEXT:    vmovaps %ymm11, 64(%r9)
+; AVX-NEXT:    vmovaps %ymm6, 1024(%r9)
+; AVX-NEXT:    vmovaps %ymm7, 864(%r9)
+; AVX-NEXT:    vmovaps %ymm5, 704(%r9)
+; AVX-NEXT:    vmovaps %ymm4, 544(%r9)
+; AVX-NEXT:    vmovaps %ymm2, 384(%r9)
+; AVX-NEXT:    vmovaps %ymm3, 224(%r9)
+; AVX-NEXT:    vmovaps %ymm1, 64(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 1248(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -6162,7 +6127,7 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 992(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 928(%r9)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 896(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 832(%r9)
@@ -6174,7 +6139,7 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 672(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 608(%r9)
-; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 576(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 512(%r9)
@@ -6212,7 +6177,7 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 160(%r9)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, (%r9)
-; AVX-NEXT:    addq $1784, %rsp # imm = 0x6F8
+; AVX-NEXT:    addq $1576, %rsp # imm = 0x628
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index 7cac6e8e1a5f8..7fe0bcc0f3d8d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -764,77 +764,76 @@ define void @store_i32_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX-LABEL: store_i32_stride6_vf8:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT:    vmovaps (%rdi), %ymm7
-; AVX-NEXT:    vmovaps (%rsi), %ymm8
-; AVX-NEXT:    vmovaps (%rdx), %ymm2
-; AVX-NEXT:    vmovaps (%rcx), %ymm3
-; AVX-NEXT:    vmovaps (%r8), %ymm1
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
+; AVX-NEXT:    vmovaps (%rdi), %ymm3
+; AVX-NEXT:    vmovaps (%rsi), %ymm4
+; AVX-NEXT:    vmovaps (%rdx), %ymm1
+; AVX-NEXT:    vmovaps (%rcx), %ymm2
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 16(%r9), %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
-; AVX-NEXT:    vmovaps (%rcx), %xmm9
-; AVX-NEXT:    vmovaps (%rdx), %xmm10
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm10[1,2],xmm9[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
-; AVX-NEXT:    vmovaps (%rsi), %xmm5
-; AVX-NEXT:    vmovaps (%rdi), %xmm6
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm11 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm12[4,5],ymm4[6,7]
-; AVX-NEXT:    vbroadcastss 4(%r8), %xmm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3],ymm4[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 4(%r9), %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm12[3],ymm4[4,5,6,7]
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm8 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm2[1,2],ymm3[1,2],ymm2[5,6],ymm3[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5],ymm7[6,7]
-; AVX-NEXT:    vbroadcastss 20(%r8), %xmm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm12[2,3],ymm7[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 20(%r9), %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm12[3],ymm7[4,5,6,7]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm11, %ymm9
-; AVX-NEXT:    vpermilps {{.*#+}} xmm10 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm10, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3,4,5],ymm10[6,7]
-; AVX-NEXT:    vmovaps (%r9), %xmm10
-; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm10[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm11, %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4,5,6],ymm10[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm2[3,0],ymm3[7,4],ymm2[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5],ymm1[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3]
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastss 16(%r9), %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5],ymm0[6,7]
+; AVX-NEXT:    vmovaps (%rcx), %xmm6
+; AVX-NEXT:    vmovaps (%rdx), %xmm7
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm7[1,2],xmm6[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm5, %ymm5
+; AVX-NEXT:    vmovaps (%rsi), %xmm8
+; AVX-NEXT:    vmovaps (%rdi), %xmm9
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm10 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm11[4,5],ymm5[6,7]
+; AVX-NEXT:    vbroadcastss 4(%r8), %xmm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm11[2,3],ymm5[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 4(%r9), %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm11[3],ymm5[4,5,6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm10, %ymm6
+; AVX-NEXT:    vpermilps {{.*#+}} xmm7 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm7, %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3,4,5],ymm7[6,7]
+; AVX-NEXT:    vmovaps (%r9), %xmm7
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm7[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm10, %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5,6],ymm7[7]
+; AVX-NEXT:    vbroadcastss (%rcx), %xmm7
+; AVX-NEXT:    vbroadcastss (%rdx), %xmm10
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm9
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3],ymm9[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm8, %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
+; AVX-NEXT:    vbroadcastss (%r9), %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5],ymm7[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm1[1,2],ymm2[1,2],ymm1[5,6],ymm2[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
+; AVX-NEXT:    vbroadcastss 20(%r8), %xmm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm8[2,3],ymm4[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 20(%r9), %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm8[3],ymm4[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[3,0],ymm1[3,0],ymm2[7,4],ymm1[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6],ymm2[7]
-; AVX-NEXT:    vbroadcastss (%rcx), %xmm2
-; AVX-NEXT:    vbroadcastss (%rdx), %xmm3
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm5
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX-NEXT:    vbroadcastss (%r9), %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
-; AVX-NEXT:    vmovaps %ymm2, (%rax)
 ; AVX-NEXT:    vmovaps %ymm1, 160(%rax)
-; AVX-NEXT:    vmovaps %ymm9, 64(%rax)
-; AVX-NEXT:    vmovaps %ymm7, 128(%rax)
-; AVX-NEXT:    vmovaps %ymm4, 32(%rax)
+; AVX-NEXT:    vmovaps %ymm4, 128(%rax)
+; AVX-NEXT:    vmovaps %ymm7, (%rax)
+; AVX-NEXT:    vmovaps %ymm6, 64(%rax)
+; AVX-NEXT:    vmovaps %ymm5, 32(%rax)
 ; AVX-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
@@ -1477,171 +1476,165 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride6_vf16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $104, %rsp
-; AVX-NEXT:    vmovaps 32(%rdi), %ymm5
+; AVX-NEXT:    subq $40, %rsp
+; AVX-NEXT:    vmovaps 32(%rdi), %ymm11
 ; AVX-NEXT:    vmovaps 32(%rsi), %ymm13
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm7
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm8
 ; AVX-NEXT:    vmovaps 32(%rcx), %ymm9
-; AVX-NEXT:    vmovaps 32(%r8), %ymm11
-; AVX-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rcx), %xmm8
-; AVX-NEXT:    vmovaps 32(%rcx), %xmm3
-; AVX-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps (%rdx), %xmm6
-; AVX-NEXT:    vmovaps 32(%rdx), %xmm10
-; AVX-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm6[1,2],xmm8[1,2]
+; AVX-NEXT:    vmovaps (%rcx), %xmm4
+; AVX-NEXT:    vmovaps 32(%rcx), %xmm5
+; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps (%rdx), %xmm15
+; AVX-NEXT:    vmovaps 32(%rdx), %xmm6
+; AVX-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm15[1,2],xmm4[1,2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX-NEXT:    vmovaps (%rsi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vmovaps (%rdi), %xmm2
 ; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm1
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm3 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX-NEXT:    vbroadcastss 4(%r8), %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vbroadcastss 4(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[4],ymm13[4],ymm5[5],ymm13[5]
+; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[4],ymm13[4],ymm11[5],ymm13[5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm9[0],ymm7[0],ymm9[2],ymm7[2]
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm9[0],ymm8[0],ymm9[2],ymm8[2]
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX-NEXT:    vbroadcastss 48(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm10[1,2],xmm3[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm6[1,2],xmm5[1,2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm3
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm2
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm15 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 36(%r8), %xmm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm11[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 36(%r9), %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm11[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rdi), %ymm1
-; AVX-NEXT:    vmovaps (%rsi), %ymm0
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm11 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm11[2,3,2,3]
-; AVX-NEXT:    vmovaps (%rdx), %ymm11
-; AVX-NEXT:    vmovaps (%rcx), %ymm12
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
-; AVX-NEXT:    vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm10[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm14[0,1],ymm10[2,3],ymm14[4,5,6,7]
-; AVX-NEXT:    vmovaps (%r8), %ymm14
-; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm14[4,5],ymm10[6,7]
-; AVX-NEXT:    vbroadcastss 16(%r9), %ymm14
-; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm10[0,1,2,3,4],ymm14[5],ymm10[6,7]
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm13 = ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[6],ymm13[6],ymm5[7],ymm13[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm7[1,2],ymm9[1,2],ymm7[5,6],ymm9[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm13[4,5],ymm5[6,7]
-; AVX-NEXT:    vbroadcastss 52(%r8), %xmm10
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm5
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm2
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm1
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm6 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm10
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5],ymm5[6,7]
+; AVX-NEXT:    vbroadcastss 36(%r8), %xmm10
 ; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 52(%r9), %ymm10
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm5[0,1,2],ymm10[3],ymm5[4,5,6,7]
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm5 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm11[1,2],ymm12[1,2],ymm11[5,6],ymm12[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 20(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 20(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX-NEXT:    vbroadcastss 36(%r9), %ymm10
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm10[3],ymm5[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rdi), %ymm14
+; AVX-NEXT:    vmovaps (%rsi), %ymm5
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm14[0],ymm5[0],ymm14[1],ymm5[1],ymm14[4],ymm5[4],ymm14[5],ymm5[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm10[2,3,2,3]
+; AVX-NEXT:    vmovaps (%rdx), %ymm10
+; AVX-NEXT:    vmovaps (%rcx), %ymm12
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm12[0],ymm10[0],ymm12[2],ymm10[2]
+; AVX-NEXT:    vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm7[2,3],ymm0[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastss 16(%r9), %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3,4],ymm7[5],ymm0[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm15[2],xmm4[2],xmm15[3],xmm4[3]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
-; AVX-NEXT:    vpermilps {{.*#+}} xmm4 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
-; AVX-NEXT:    vmovaps (%r9), %xmm4
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm4[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm0[0],ymm4[1],ymm0[2,3,4,5,6],ymm4[7]
-; AVX-NEXT:    vbroadcastss 32(%rcx), %xmm0
-; AVX-NEXT:    vbroadcastss 32(%rdx), %xmm6
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm2, %ymm2
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 32(%r9), %ymm2
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm9[3,0],ymm7[3,0],ymm9[7,4],ymm7[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vmovaps (%r9), %xmm3
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vbroadcastss 32(%rcx), %xmm0
+; AVX-NEXT:    vbroadcastss 32(%rdx), %xmm3
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm1, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastss 32(%r9), %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[6],ymm13[6],ymm11[7],ymm13[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm8[1,2],ymm9[1,2],ymm8[5,6],ymm9[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 52(%r8), %xmm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 52(%r9), %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm1[0,1,2],ymm3[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm6, %ymm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vmovaps 32(%r9), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
+; AVX-NEXT:    vbroadcastss (%rcx), %xmm1
+; AVX-NEXT:    vbroadcastss (%rdx), %xmm3
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm15, %ymm3
-; AVX-NEXT:    vpermilps {{.*#+}} xmm6 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm6, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3,4,5],ymm6[6,7]
-; AVX-NEXT:    vmovaps 32(%r9), %xmm6
-; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm6[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3,4,5,6],ymm6[7]
-; AVX-NEXT:    vbroadcastss (%rcx), %xmm6
-; AVX-NEXT:    vbroadcastss (%rdx), %xmm7
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm8
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm7, %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
-; AVX-NEXT:    vbroadcastss (%r9), %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm12[3,0],ymm11[3,0],ymm12[7,4],ymm11[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm7[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm7 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3,4,5],ymm7[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm7[1],ymm5[2,3,4,5,6],ymm7[7]
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3],ymm3[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss (%r9), %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5],ymm1[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} ymm3 = ymm14[2],ymm5[2],ymm14[3],ymm5[3],ymm14[6],ymm5[6],ymm14[7],ymm5[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm10[1,2],ymm12[1,2],ymm10[5,6],ymm12[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
+; AVX-NEXT:    vbroadcastss 20(%r8), %xmm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 20(%r9), %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm12[3,0],ymm10[3,0],ymm12[7,4],ymm10[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3],ymm5[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5],ymm5[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6],ymm5[7]
 ; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT:    vmovaps %ymm5, 160(%rax)
-; AVX-NEXT:    vmovaps %ymm6, (%rax)
-; AVX-NEXT:    vmovaps %ymm3, 256(%rax)
-; AVX-NEXT:    vmovaps %ymm0, 352(%rax)
+; AVX-NEXT:    vmovaps %ymm3, 160(%rax)
+; AVX-NEXT:    vmovaps %ymm4, 128(%rax)
+; AVX-NEXT:    vmovaps %ymm1, (%rax)
+; AVX-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX-NEXT:    vmovaps %ymm8, 352(%rax)
+; AVX-NEXT:    vmovaps %ymm11, 320(%rax)
 ; AVX-NEXT:    vmovaps %ymm2, 192(%rax)
-; AVX-NEXT:    vmovaps %ymm4, 64(%rax)
-; AVX-NEXT:    vmovaps %ymm1, 128(%rax)
-; AVX-NEXT:    vmovaps %ymm10, 320(%rax)
-; AVX-NEXT:    vmovaps %ymm14, 96(%rax)
-; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm15, 64(%rax)
+; AVX-NEXT:    vmovaps %ymm7, 96(%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 224(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 288(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX-NEXT:    addq $104, %rsp
+; AVX-NEXT:    addq $40, %rsp
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -3224,57 +3217,50 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride6_vf32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $1032, %rsp # imm = 0x408
-; AVX-NEXT:    vmovaps (%rdi), %ymm12
-; AVX-NEXT:    vmovaps (%rsi), %ymm8
-; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rdx), %ymm4
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rcx), %ymm6
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%r8), %ymm5
-; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX-NEXT:    vmovaps (%rdi), %ymm7
+; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rsi), %ymm10
+; AVX-NEXT:    vmovups %ymm10, (%rsp) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rdx), %ymm8
+; AVX-NEXT:    vmovaps (%rcx), %ymm4
+; AVX-NEXT:    vmovaps (%rcx), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vmovaps 32(%rcx), %xmm2
 ; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps (%rdx), %xmm0
-; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 32(%rdx), %xmm3
-; AVX-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
+; AVX-NEXT:    vmovaps (%rdx), %xmm14
+; AVX-NEXT:    vmovaps 32(%rdx), %xmm6
+; AVX-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm14[1,2],xmm0[1,2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps (%rdi), %xmm7
-; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX-NEXT:    vmovaps (%rsi), %xmm5
+; AVX-NEXT:    vmovaps (%rdi), %xmm3
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
 ; AVX-NEXT:    vbroadcastss 4(%r8), %xmm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vbroadcastss 4(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm12[0],ymm8[0],ymm12[1],ymm8[1],ymm12[4],ymm8[4],ymm12[5],ymm8[5]
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[4],ymm10[4],ymm7[5],ymm10[5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm8[0],ymm4[2],ymm8[2]
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX-NEXT:    vbroadcastss 16(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,2],xmm2[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm6[1,2],xmm2[1,2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX-NEXT:    vmovaps 32(%rsi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vmovaps 32(%rdi), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, (%rsp) # 16-byte Spill
+; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -3289,317 +3275,292 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps 32(%rsi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm8
-; AVX-NEXT:    vmovaps 32(%rcx), %ymm13
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm8[0],ymm13[2],ymm8[2]
-; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 32(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 48(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[2,3,2,3]
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm2
+; AVX-NEXT:    vmovaps 32(%rcx), %ymm1
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
+; AVX-NEXT:    vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
+; AVX-NEXT:    vbroadcastss 48(%r9), %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 64(%rdx), %xmm0
+; AVX-NEXT:    vmovaps 64(%rcx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 64(%rsi), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 64(%rdi), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 68(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 68(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vmovaps 64(%rdx), %xmm6
+; AVX-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[1,2],xmm0[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm6, %ymm6
+; AVX-NEXT:    vmovaps 64(%rsi), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 64(%rdi), %xmm7
+; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rdi), %ymm6
-; AVX-NEXT:    vmovaps 64(%rsi), %ymm14
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm14[0],ymm6[1],ymm14[1],ymm6[4],ymm14[4],ymm6[5],ymm14[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 64(%rdx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rcx), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 64(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 80(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
+; AVX-NEXT:    vbroadcastss 68(%r8), %xmm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 68(%r9), %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm7[3],ymm6[4,5,6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rcx), %xmm9
-; AVX-NEXT:    vmovaps 96(%rdx), %xmm11
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm11[1,2],xmm9[1,2]
-; AVX-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT:    vmovaps 96(%rsi), %xmm5
-; AVX-NEXT:    vmovaps 96(%rdi), %xmm4
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX-NEXT:    vmovaps 64(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
-; AVX-NEXT:    vbroadcastss 100(%r8), %xmm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 100(%r9), %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6,7]
+; AVX-NEXT:    vmovaps 64(%rsi), %ymm6
+; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm6 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[4],ymm6[4],ymm0[5],ymm6[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm6[2,3,2,3]
+; AVX-NEXT:    vmovaps 64(%rdx), %ymm7
+; AVX-NEXT:    vmovaps 64(%rcx), %ymm6
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm6[0],ymm7[0],ymm6[2],ymm7[2]
+; AVX-NEXT:    vextractf128 $1, %ymm11, %xmm11
+; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm11[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],mem[4,5],ymm10[6,7]
+; AVX-NEXT:    vbroadcastss 80(%r9), %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm11[5],ymm10[6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rcx), %xmm11
+; AVX-NEXT:    vmovaps 96(%rdx), %xmm12
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm12[1,2],xmm11[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm10[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm10, %ymm10
+; AVX-NEXT:    vmovaps 96(%rsi), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 96(%rdi), %xmm13
+; AVX-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rdi), %ymm7
-; AVX-NEXT:    vmovaps 96(%rsi), %ymm3
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm7[0],ymm3[0],ymm7[1],ymm3[1],ymm7[4],ymm3[4],ymm7[5],ymm3[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm10[2,3,2,3]
-; AVX-NEXT:    vmovaps 96(%rdx), %ymm10
-; AVX-NEXT:    vmovaps 96(%rcx), %ymm2
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5],ymm10[6,7]
+; AVX-NEXT:    vbroadcastss 100(%r8), %xmm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 100(%r9), %ymm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2],ymm13[3],ymm10[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rdi), %ymm0
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rsi), %ymm10
 ; AVX-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm0[0],ymm10[0],ymm0[1],ymm10[1],ymm0[4],ymm10[4],ymm0[5],ymm10[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm10[2,3,2,3]
+; AVX-NEXT:    vmovaps 96(%rdx), %ymm13
+; AVX-NEXT:    vmovaps 96(%rcx), %ymm10
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm10[0],ymm13[0],ymm10[2],ymm13[2]
 ; AVX-NEXT:    vextractf128 $1, %ymm15, %xmm15
 ; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm15[2,3],ymm1[4,5,6,7]
-; AVX-NEXT:    vmovaps 96(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX-NEXT:    vbroadcastss 112(%r9), %ymm15
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm15[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm1 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm12[1,2],ymm0[5,6],ymm12[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 20(%r8), %xmm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 20(%r9), %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm15 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm8[1,2],ymm13[1,2],ymm8[5,6],ymm13[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 52(%r8), %xmm8
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 52(%r9), %ymm8
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm14[2],ymm6[3],ymm14[3],ymm6[6],ymm14[6],ymm6[7],ymm14[7]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT:    vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm0 = ymm14[1,2],mem[1,2],ymm14[5,6],mem[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 84(%r8), %xmm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 84(%r9), %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm9[0,0,0,0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm11[0,0,0,0]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 96(%r8), %ymm0, %ymm0
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
-; AVX-NEXT:    vbroadcastss 96(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm11 = ymm7[2],ymm3[2],ymm7[3],ymm3[3],ymm7[6],ymm3[6],ymm7[7],ymm3[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm10[1,2],ymm2[1,2],ymm10[5,6],ymm2[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 116(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 116(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vbroadcastss (%rcx), %xmm0
-; AVX-NEXT:    vbroadcastss (%rdx), %xmm1
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm1, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss (%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX-NEXT:    vbroadcastss (%rdx), %xmm15
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastss (%r9), %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm14[2],mem[2],xmm14[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm9, %ymm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5],ymm3[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
 ; AVX-NEXT:    vmovaps (%r9), %xmm3
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4,5,6],ymm3[7]
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm12[3,0],mem[3,0],ymm12[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[2,3],ymm3[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm4 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3,4,5],ymm4[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7]
-; AVX-NEXT:    vbroadcastss 32(%rcx), %xmm4
-; AVX-NEXT:    vbroadcastss 32(%rdx), %xmm6
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; AVX-NEXT:    vmovaps (%rsp), %xmm5 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm6 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm6 = xmm5[0],mem[0],xmm5[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm7[0,1],ymm4[2,3],ymm7[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm6, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
-; AVX-NEXT:    vbroadcastss 32(%r9), %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5],ymm4[6,7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm6 = xmm6[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX-NEXT:    vpermilps {{.*#+}} xmm7 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm7, %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3,4,5],ymm7[6,7]
-; AVX-NEXT:    vmovaps 32(%r9), %xmm7
-; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm7[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm7, %ymm8, %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm6[0],ymm7[1],ymm6[2,3,4,5,6],ymm7[7]
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm6 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm6 = ymm13[3,0],mem[3,0],ymm13[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm15[2,3],ymm6[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm8 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3,4,5],ymm8[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3,4,5,6],ymm8[7]
-; AVX-NEXT:    vbroadcastss 64(%rcx), %xmm8
-; AVX-NEXT:    vbroadcastss 64(%rdx), %xmm9
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm9 = xmm5[0],mem[0],xmm5[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3],ymm12[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 64(%r8), %ymm9, %ymm9
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
-; AVX-NEXT:    vbroadcastss 64(%r9), %ymm9
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm9 = xmm9[2],mem[2],xmm9[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm12, %ymm9
-; AVX-NEXT:    vpermilps {{.*#+}} xmm12 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm12, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3,4,5],ymm12[6,7]
-; AVX-NEXT:    vmovaps 64(%r9), %xmm12
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm12[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3,4,5,6],ymm12[7]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm5[3,0],ymm14[3,0],ymm5[7,4],ymm14[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm5 = mem[2,3],ymm12[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm12 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1],ymm5[2,3,4,5],ymm12[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3,4,5,6],ymm12[7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm12 = xmm10[2],mem[2],xmm10[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm10, %ymm12
-; AVX-NEXT:    vpermilps {{.*#+}} xmm13 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm13, %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
-; AVX-NEXT:    vmovaps 96(%r9), %xmm13
-; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm13[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm14, %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm10 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm11[2,3],ymm10[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm10 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm10 = ymm10[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,3,4,5],ymm10[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm10 = ymm10[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm10[1],ymm2[2,3,4,5,6],ymm10[7]
-; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT:    vmovaps %ymm2, 736(%rax)
-; AVX-NEXT:    vmovaps %ymm12, 640(%rax)
-; AVX-NEXT:    vmovaps %ymm5, 544(%rax)
-; AVX-NEXT:    vmovaps %ymm9, 448(%rax)
-; AVX-NEXT:    vmovaps %ymm8, 384(%rax)
-; AVX-NEXT:    vmovaps %ymm6, 352(%rax)
-; AVX-NEXT:    vmovaps %ymm7, 256(%rax)
-; AVX-NEXT:    vmovaps %ymm4, 192(%rax)
-; AVX-NEXT:    vmovaps %ymm3, 160(%rax)
-; AVX-NEXT:    vmovaps %ymm1, 64(%rax)
-; AVX-NEXT:    vmovaps %ymm0, (%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 704(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 576(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 512(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 320(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 128(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 672(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 608(%rax)
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 480(%rax)
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm3[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 416(%rax)
+; AVX-NEXT:    vunpckhps (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm8[1,2],ymm4[1,2],ymm8[5,6],ymm4[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
+; AVX-NEXT:    vbroadcastss 20(%r8), %xmm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 20(%r9), %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm3, (%rsp) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm8[3,0],ymm4[7,4],ymm8[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0],ymm3[1],ymm0[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vbroadcastss 32(%rcx), %xmm0
+; AVX-NEXT:    vbroadcastss 32(%rdx), %xmm3
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3],ymm4[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastss 32(%r9), %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm4 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3,4,5],ymm4[6,7]
+; AVX-NEXT:    vmovaps 32(%r9), %xmm4
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm4[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm0[0],ymm4[1],ymm0[2,3,4,5,6],ymm4[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm2[1,2],ymm1[1,2],ymm2[5,6],ymm1[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm5 = ymm5[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
+; AVX-NEXT:    vbroadcastss 52(%r8), %xmm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm8[2,3],ymm5[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 52(%r9), %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm8[3],ymm5[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm2[3,0],ymm1[7,4],ymm2[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
+; AVX-NEXT:    vbroadcastss 64(%rcx), %xmm1
+; AVX-NEXT:    vbroadcastss 64(%rdx), %xmm2
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm1[2,3],ymm8[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 64(%r8), %ymm2, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 64(%r9), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm8, %ymm2
+; AVX-NEXT:    vpermilps {{.*#+}} xmm8 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm8, %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3,4,5],ymm8[6,7]
+; AVX-NEXT:    vmovaps 64(%r9), %xmm8
+; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm8[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm9, %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0],ymm8[1],ymm2[2,3,4,5,6],ymm8[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm9 = ymm7[1,2],ymm6[1,2],ymm7[5,6],ymm6[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm9 = ymm9[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm2[4,5],ymm9[6,7]
+; AVX-NEXT:    vbroadcastss 84(%r8), %xmm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm15[2,3],ymm9[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 84(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm15[3],ymm9[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm7[3,0],ymm6[7,4],ymm7[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm6[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm6 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3,4,5],ymm6[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm6 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0],ymm6[1],ymm2[2,3,4,5,6],ymm6[7]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm11[0,0,0,0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm12[0,0,0,0]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm15[0],xmm7[0],xmm15[1],xmm7[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm15[0,1],ymm7[2,3],ymm15[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 96(%r8), %ymm2, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2,3],ymm2[4,5],ymm7[6,7]
+; AVX-NEXT:    vbroadcastss 96(%r9), %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4],ymm7[5],ymm2[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm11, %ymm2
+; AVX-NEXT:    vpermilps {{.*#+}} xmm11 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm11, %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,3,4,5],ymm11[6,7]
+; AVX-NEXT:    vmovaps 96(%r9), %xmm11
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm11[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm11[1],ymm2[2,3,4,5,6],ymm11[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm11 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm13[1,2],ymm10[1,2],ymm13[5,6],ymm10[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7]
+; AVX-NEXT:    vbroadcastss 116(%r8), %xmm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm15[2,3],ymm12[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 116(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3],ymm12[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm10 = ymm10[3,0],ymm13[3,0],ymm10[7,4],ymm13[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm11[2,3],ymm10[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3,4,5],ymm11[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0],ymm11[1],ymm10[2,3,4,5,6],ymm11[7]
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX-NEXT:    vmovaps %ymm10, 736(%rax)
+; AVX-NEXT:    vmovaps %ymm12, 704(%rax)
+; AVX-NEXT:    vmovaps %ymm2, 640(%rax)
+; AVX-NEXT:    vmovaps %ymm7, 576(%rax)
+; AVX-NEXT:    vmovaps %ymm6, 544(%rax)
+; AVX-NEXT:    vmovaps %ymm9, 512(%rax)
+; AVX-NEXT:    vmovaps %ymm8, 448(%rax)
+; AVX-NEXT:    vmovaps %ymm1, 384(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 352(%rax)
+; AVX-NEXT:    vmovaps %ymm5, 320(%rax)
+; AVX-NEXT:    vmovaps %ymm4, 256(%rax)
+; AVX-NEXT:    vmovaps %ymm3, 192(%rax)
+; AVX-NEXT:    vmovaps %ymm14, 160(%rax)
+; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 128(%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, (%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 672(%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 608(%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 480(%rax)
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vmovaps %ymm0, 416(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 288(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -3608,7 +3569,7 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX-NEXT:    addq $1032, %rsp # imm = 0x408
+; AVX-NEXT:    addq $712, %rsp # imm = 0x2C8
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -6568,32 +6529,27 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride6_vf64:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $2504, %rsp # imm = 0x9C8
-; AVX-NEXT:    vmovaps (%rdi), %ymm8
-; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rsi), %ymm9
-; AVX-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rdx), %ymm4
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rcx), %ymm15
-; AVX-NEXT:    vmovaps (%r8), %ymm6
+; AVX-NEXT:    subq $2056, %rsp # imm = 0x808
+; AVX-NEXT:    vmovaps (%rdi), %ymm6
 ; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rsi), %ymm7
+; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps (%rdx), %ymm3
+; AVX-NEXT:    vmovaps (%rcx), %ymm2
 ; AVX-NEXT:    vmovaps (%rcx), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 32(%rcx), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 32(%rcx), %xmm4
+; AVX-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vmovaps (%rdx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 32(%rdx), %xmm3
-; AVX-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 32(%rdx), %xmm5
+; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps (%rsi), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps (%rdi), %xmm7
-; AVX-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX-NEXT:    vmovaps (%rsi), %xmm13
+; AVX-NEXT:    vmovaps (%rdi), %xmm12
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
@@ -6602,25 +6558,24 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vbroadcastss 4(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm15[0],ymm4[0],ymm15[2],ymm4[2]
-; AVX-NEXT:    vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX-NEXT:    vbroadcastss 16(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,2],xmm2[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm5[1,2],xmm4[1,2]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX-NEXT:    vmovaps 32(%rsi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm4
+; AVX-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
@@ -6634,414 +6589,348 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps 32(%rsi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm0
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 32(%rcx), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 32(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 48(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rcx), %xmm1
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX-NEXT:    vextractf128 $1, %ymm5, %xmm5
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],mem[4,5],ymm4[6,7]
+; AVX-NEXT:    vbroadcastss 48(%r9), %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
+; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 64(%rcx), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 64(%rdx), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 64(%rdx), %xmm0
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[1,2],xmm0[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
+; AVX-NEXT:    vmovaps 64(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 64(%rsi), %xmm1
+; AVX-NEXT:    vmovaps 64(%rdi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 64(%rdi), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 68(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 68(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rdi), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rsi), %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
+; AVX-NEXT:    vbroadcastss 68(%r8), %xmm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 68(%r9), %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 64(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 64(%rdx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 64(%rcx), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 64(%r8), %ymm1
+; AVX-NEXT:    vmovaps 64(%rsi), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 80(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rcx), %xmm1
-; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 96(%rdx), %xmm0
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm4[2,3,2,3]
+; AVX-NEXT:    vmovaps 64(%rdx), %ymm5
+; AVX-NEXT:    vmovaps 64(%rcx), %ymm4
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT:    vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm7[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
+; AVX-NEXT:    vbroadcastss 80(%r9), %ymm7
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
+; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rcx), %xmm7
+; AVX-NEXT:    vmovaps 96(%rdx), %xmm6
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm6[1,2],xmm7[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm8[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm8, %ymm8, %ymm8
+; AVX-NEXT:    vmovaps 96(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 96(%rsi), %xmm1
+; AVX-NEXT:    vmovaps 96(%rdi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 96(%rdi), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 100(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 100(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rdi), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rsi), %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm9
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
+; AVX-NEXT:    vbroadcastss 100(%r8), %xmm9
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 100(%r9), %ymm9
+; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3],ymm8[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 96(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 96(%rdx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 96(%rcx), %ymm1
+; AVX-NEXT:    vmovaps 96(%rsi), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 96(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 112(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 128(%rcx), %xmm1
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm8[2,3,2,3]
+; AVX-NEXT:    vmovaps 96(%rdx), %ymm9
+; AVX-NEXT:    vmovaps 96(%rcx), %ymm8
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm11 = ymm8[0],ymm9[0],ymm8[2],ymm9[2]
+; AVX-NEXT:    vextractf128 $1, %ymm11, %xmm11
+; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm11[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],mem[4,5],ymm10[6,7]
+; AVX-NEXT:    vbroadcastss 112(%r9), %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm11[5],ymm10[6,7]
+; AVX-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 128(%rcx), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 128(%rdx), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 128(%rdx), %xmm0
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm1[1,2],xmm0[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm10 = xmm10[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm10, %ymm10
+; AVX-NEXT:    vmovaps 128(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 128(%rsi), %xmm1
+; AVX-NEXT:    vmovaps 128(%rdi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 128(%rdi), %xmm2
-; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 132(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 132(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
+; AVX-NEXT:    vbroadcastss 132(%r8), %xmm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 132(%r9), %ymm11
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 128(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 128(%rsi), %ymm11
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm11[0],ymm0[1],ymm11[1],ymm0[4],ymm11[4],ymm0[5],ymm11[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 128(%rdx), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 128(%rcx), %ymm2
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 128(%r8), %ymm1
+; AVX-NEXT:    vmovaps 128(%rsi), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 144(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm10[2,3,2,3]
+; AVX-NEXT:    vmovaps 128(%rdx), %ymm11
+; AVX-NEXT:    vmovaps 128(%rcx), %ymm10
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
+; AVX-NEXT:    vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5],ymm14[6,7]
+; AVX-NEXT:    vbroadcastss 144(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 160(%rcx), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vmovaps 160(%rdx), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,2],xmm0[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 160(%rsi), %xmm7
-; AVX-NEXT:    vmovaps 160(%rdi), %xmm6
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 164(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 164(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm1[1,2],xmm0[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm15, %ymm15
+; AVX-NEXT:    vmovaps 160(%rsi), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 160(%rdi), %xmm1
+; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 160(%rdi), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 160(%rsi), %ymm0
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm14
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5],ymm15[6,7]
+; AVX-NEXT:    vbroadcastss 164(%r8), %xmm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 164(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 160(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 160(%rdx), %ymm1
+; AVX-NEXT:    vmovaps 160(%rsi), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 160(%rcx), %ymm8
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
-; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 160(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 176(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm14 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX-NEXT:    vmovaps 160(%rdx), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 192(%rcx), %xmm1
+; AVX-NEXT:    vmovaps 160(%rcx), %ymm1
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX-NEXT:    vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5],ymm14[6,7]
+; AVX-NEXT:    vbroadcastss 176(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 192(%rcx), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 192(%rdx), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 192(%rdx), %xmm0
+; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm1[1,2],xmm0[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm14[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm14, %ymm14
+; AVX-NEXT:    vmovaps 192(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 192(%rsi), %xmm3
-; AVX-NEXT:    vmovaps 192(%rdi), %xmm2
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 196(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 196(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vmovaps 192(%rdi), %xmm1
+; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
+; AVX-NEXT:    vbroadcastss 196(%r8), %xmm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 196(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 192(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 192(%rdi), %ymm1
+; AVX-NEXT:    vmovaps 192(%rsi), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 192(%rsi), %ymm0
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm14 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX-NEXT:    vmovaps 192(%rdx), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vmovaps 192(%rdx), %ymm4
 ; AVX-NEXT:    vmovaps 192(%rcx), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
-; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vmovaps 192(%r8), %ymm1
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 208(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
-; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 224(%rcx), %xmm1
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX-NEXT:    vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5],ymm14[6,7]
+; AVX-NEXT:    vbroadcastss 208(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 224(%rcx), %xmm0
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps 224(%rdx), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 224(%rdx), %xmm0
+; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm1[1,2],xmm0[1,2]
+; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm14[0,2,1,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm14, %ymm14
+; AVX-NEXT:    vmovaps 224(%rsi), %xmm0
 ; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[1,2]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT:    vmovaps 224(%rsi), %xmm1
+; AVX-NEXT:    vmovaps 224(%rdi), %xmm1
 ; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 224(%rdi), %xmm5
-; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 228(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 228(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
+; AVX-NEXT:    vbroadcastss 228(%r8), %xmm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 228(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 224(%rdi), %ymm0
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps 224(%rdi), %ymm9
-; AVX-NEXT:    vmovaps 224(%rsi), %ymm0
-; AVX-NEXT:    vunpcklps {{.*#+}} ymm14 = ymm9[0],ymm0[0],ymm9[1],ymm0[1],ymm9[4],ymm0[4],ymm9[5],ymm0[5]
+; AVX-NEXT:    vmovaps 224(%rsi), %ymm1
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm14 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX-NEXT:    vmovaps 224(%rdx), %ymm5
-; AVX-NEXT:    vmovaps 224(%rcx), %ymm1
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm13 = ymm1[0],ymm5[0],ymm1[2],ymm5[2]
+; AVX-NEXT:    vmovaps 224(%rdx), %ymm1
 ; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
-; AVX-NEXT:    vmovaps 224(%r8), %ymm10
-; AVX-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm10[4,5],ymm13[6,7]
-; AVX-NEXT:    vbroadcastss 240(%r9), %ymm14
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1,2,3,4],ymm14[5],ymm13[6,7]
-; AVX-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm10[1,2],ymm15[1,2],ymm10[5,6],ymm15[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5],ymm13[6,7]
-; AVX-NEXT:    vbroadcastss 20(%r8), %xmm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3],ymm13[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 20(%r9), %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1,2],ymm15[3],ymm13[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm13 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
-; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm15 = ymm12[1,2],mem[1,2],ymm12[5,6],mem[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm15 = ymm15[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm13[4,5],ymm15[6,7]
-; AVX-NEXT:    vbroadcastss 52(%r8), %xmm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm15[0,1],ymm12[2,3],ymm15[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 52(%r9), %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3],ymm12[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm13 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
-; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm12 = ymm12[1,2],mem[1,2],ymm12[5,6],mem[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
-; AVX-NEXT:    vbroadcastss 84(%r8), %xmm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm15[2,3],ymm12[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 84(%r9), %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3],ymm12[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm12 = xmm12[0],mem[0],xmm12[1],mem[1]
-; AVX-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm15 = mem[0,0,0,0]
-; AVX-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm13 = mem[0,0,0,0]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3],ymm15[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 96(%r8), %ymm12, %ymm12
+; AVX-NEXT:    vmovaps 224(%rcx), %ymm0
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT:    vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT:    vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5],ymm14[6,7]
+; AVX-NEXT:    vbroadcastss 240(%r9), %ymm15
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
+; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vbroadcastss (%rcx), %xmm14
+; AVX-NEXT:    vbroadcastss (%rdx), %xmm15
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2,3],ymm13[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm12, %ymm12
 ; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
-; AVX-NEXT:    vbroadcastss 96(%r9), %ymm13
+; AVX-NEXT:    vbroadcastss (%r9), %ymm13
 ; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4],ymm13[5],ymm12[6,7]
 ; AVX-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm12 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT:    vshufps $153, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm13 = ymm13[1,2],mem[1,2],ymm13[5,6],mem[5,6]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm12 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm12
+; AVX-NEXT:    vpermilps {{.*#+}} xmm13 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm13, %ymm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
+; AVX-NEXT:    vmovaps (%r9), %xmm13
+; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm13[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm14, %ymm13
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
+; AVX-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm12 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm3[1,2],ymm2[1,2],ymm3[5,6],ymm2[5,6]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,2,3]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[0,2,1,3,4,6,5,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm12[4,5],ymm13[6,7]
-; AVX-NEXT:    vbroadcastss 116(%r8), %xmm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3],ymm13[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 116(%r9), %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm15[3],ymm13[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vbroadcastss 20(%r8), %xmm14
+; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2,3],ymm13[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 20(%r9), %ymm14
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm13[0,1,2],ymm14[3],ymm13[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[3,0],ymm3[3,0],ymm2[7,4],ymm3[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5],ymm3[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vbroadcastss 32(%rcx), %xmm2
+; AVX-NEXT:    vbroadcastss 32(%rdx), %xmm3
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm3 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm12
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1],ymm2[2,3],ymm12[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX-NEXT:    vbroadcastss 32(%r9), %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm2 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5],ymm3[6,7]
+; AVX-NEXT:    vmovaps 32(%r9), %xmm3
+; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm3[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm12, %ymm3
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm2 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm13 = ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[6],ymm11[6],ymm13[7],ymm11[7]
-; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[1,2],ymm15[1,2],ymm11[5,6],ymm15[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5],ymm11[6,7]
-; AVX-NEXT:    vbroadcastss 148(%r8), %xmm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2,3],ymm11[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 148(%r9), %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm13[3],ymm11[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm7 = mem[0,0,0,0]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm13[0,0,0,0]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm7 = xmm11[0],xmm7[0],xmm11[1],xmm7[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3],ymm11[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 160(%r8), %ymm6, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
-; AVX-NEXT:    vbroadcastss 160(%r9), %ymm7
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5],ymm6[6,7]
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm7 = ymm6[2],mem[2],ymm6[3],mem[3],ymm6[6],mem[6],ymm6[7],mem[7]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[1,2],ymm8[1,2],ymm6[5,6],ymm8[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm6 = ymm6[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
-; AVX-NEXT:    vbroadcastss 180(%r8), %xmm8
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3],ymm6[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 180(%r9), %ymm8
-; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3],ymm6[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm3 = mem[0,0,0,0]
-; AVX-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm6 = mem[0,0,0,0]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm6
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 192(%r8), %ymm2, %ymm2
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
-; AVX-NEXT:    vbroadcastss 192(%r9), %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
-; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm6 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm4[1,2],ymm2[1,2],ymm4[5,6],ymm2[5,6]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm13[1,2],ymm1[1,2],ymm13[5,6],ymm1[5,6]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5],ymm3[6,7]
-; AVX-NEXT:    vbroadcastss 212(%r8), %xmm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 212(%r9), %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5,6,7]
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm3 = ymm9[2],ymm0[2],ymm9[3],ymm0[3],ymm9[6],ymm0[6],ymm9[7],ymm0[7]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm5[1,2],ymm1[1,2],ymm5[5,6],ymm1[5,6]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
-; AVX-NEXT:    vmovaps %ymm3, %ymm5
-; AVX-NEXT:    vbroadcastss 244(%r8), %xmm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
-; AVX-NEXT:    vbroadcastss 244(%r9), %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX-NEXT:    vbroadcastss 52(%r8), %xmm12
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2,3],ymm3[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 52(%r9), %ymm12
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm12[3],ymm3[4,5,6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vbroadcastss (%rcx), %xmm0
-; AVX-NEXT:    vbroadcastss (%rdx), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm13[3,0],ymm1[7,4],ymm13[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vbroadcastss 64(%rcx), %xmm0
+; AVX-NEXT:    vbroadcastss 64(%rdx), %xmm1
 ; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, (%r8), %ymm1, %ymm1
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 64(%r8), %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss (%r9), %ymm1
+; AVX-NEXT:    vbroadcastss 64(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -7053,303 +6942,321 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; AVX-NEXT:    vmovaps (%r9), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX-NEXT:    vmovaps 64(%r9), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm10[3,0],ymm0[7,4],ymm10[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm5[1,2],ymm4[1,2],ymm5[5,6],ymm4[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 84(%r8), %xmm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 84(%r9), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[3,0],ymm5[3,0],ymm4[7,4],ymm5[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vbroadcastss 32(%rcx), %xmm0
-; AVX-NEXT:    vbroadcastss 32(%rdx), %xmm1
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 32(%r8), %ymm1, %ymm1
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 32(%r9), %ymm1
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[0,0,0,0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm6[0,0,0,0]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 96(%r8), %ymm0, %ymm0
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 96(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; AVX-NEXT:    vmovaps 32(%r9), %xmm1
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm1[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX-NEXT:    vmovaps 96(%r9), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm0 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm0 = mem[2,3],ymm0[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm9[1,2],ymm8[1,2],ymm9[5,6],ymm8[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 116(%r8), %xmm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 116(%r9), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm8[3,0],ymm9[3,0],ymm8[7,4],ymm9[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vbroadcastss 64(%rcx), %xmm0
-; AVX-NEXT:    vbroadcastss 64(%rdx), %xmm1
+; AVX-NEXT:    vbroadcastss 128(%rcx), %xmm0
+; AVX-NEXT:    vbroadcastss 128(%rdx), %xmm1
 ; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 64(%r8), %ymm1, %ymm1
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 128(%r8), %ymm1, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX-NEXT:    vbroadcastss 64(%r9), %ymm1
+; AVX-NEXT:    vbroadcastss 128(%r9), %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm1 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5],ymm3[6,7]
-; AVX-NEXT:    vmovaps 64(%r9), %xmm3
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm3[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm3[1],ymm1[2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vmovaps 128(%r9), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm3 = mem[2,3],ymm3[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm4 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3,4,5],ymm4[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm11[1,2],ymm10[1,2],ymm11[5,6],ymm10[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 148(%r8), %xmm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 148(%r9), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm10[3,0],ymm11[3,0],ymm10[7,4],ymm11[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm3 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT:    vpermilps {{.*#+}} xmm4 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3,4,5],ymm4[6,7]
-; AVX-NEXT:    vmovaps 96(%r9), %xmm4
-; AVX-NEXT:    vshufps {{.*#+}} xmm8 = xmm4[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4,5,6],ymm4[7]
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[0,0,0,0]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[0,0,0,0]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 160(%r8), %ymm0, %ymm0
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 160(%r9), %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vmovaps 160(%r9), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm4 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm12[2,3],ymm4[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm8 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3,4,5],ymm8[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3,4,5,6],ymm8[7]
-; AVX-NEXT:    vbroadcastss 128(%rcx), %xmm8
-; AVX-NEXT:    vbroadcastss 128(%rdx), %xmm9
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm9 = xmm0[0],mem[0],xmm0[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3],ymm11[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 128(%r8), %ymm9, %ymm9
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
-; AVX-NEXT:    vbroadcastss 128(%r9), %ymm9
-; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[1,2],ymm3[1,2],ymm4[5,6],ymm3[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 180(%r8), %xmm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 180(%r9), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[3,0],ymm4[3,0],ymm3[7,4],ymm4[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm9 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
-; AVX-NEXT:    vpermilps {{.*#+}} xmm11 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm11, %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5],ymm11[6,7]
-; AVX-NEXT:    vmovaps 128(%r9), %xmm11
-; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm11[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
-; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2,3,4,5,6],ymm11[7]
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = ymm15[3,0],mem[3,0],ymm15[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm10 = mem[2,3],ymm11[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm11 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3,4,5],ymm11[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm11 = ymm11[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0],ymm11[1],ymm10[2,3,4,5,6],ymm11[7]
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm11 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm11 = xmm13[2],mem[2],xmm13[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm11 = xmm11[2,3,2,3]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
-; AVX-NEXT:    vpermilps {{.*#+}} xmm12 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm12, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5],ymm12[6,7]
-; AVX-NEXT:    vmovaps 160(%r9), %xmm12
-; AVX-NEXT:    vshufps {{.*#+}} xmm13 = xmm12[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
-; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3,4,5,6],ymm12[7]
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[0,0,0,0]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm3[0,0,0,0]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 192(%r8), %ymm0, %ymm0
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 192(%r9), %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vmovaps 192(%r9), %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm12 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3],ymm12[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm12 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3,4,5],ymm12[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm12 = ymm12[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2,3,4,5,6],ymm12[7]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm5[1,2],ymm3[1,2],ymm5[5,6],ymm3[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastss 212(%r8), %xmm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 212(%r9), %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[3,0],ymm5[3,0],ymm3[7,4],ymm5[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6],ymm1[7]
+; AVX-NEXT:    vbroadcastss 224(%rcx), %xmm0
+; AVX-NEXT:    vbroadcastss 224(%rdx), %xmm5
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm5 = xmm1[0],mem[0],xmm1[1],mem[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm6
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
+; AVX-NEXT:    vinsertf128 $1, 224(%r8), %ymm5, %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3],ymm5[4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastss 224(%r9), %ymm5
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm5[5],ymm0[6,7]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm12 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm12 = xmm12[2,3,2,3]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm5 = xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[2,3,2,3]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT:    vpermilps {{.*#+}} xmm13 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm13, %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
-; AVX-NEXT:    vmovaps 192(%r9), %xmm13
-; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm13[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm13, %ymm14, %ymm13
-; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4,5,6],ymm13[7]
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm13 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm6[2,3],ymm13[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm13 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm13[0,1],ymm2[2,3,4,5],ymm13[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm13 = ymm13[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm13[1],ymm2[2,3,4,5,6],ymm13[7]
-; AVX-NEXT:    vbroadcastss 224(%rcx), %xmm13
-; AVX-NEXT:    vbroadcastss 224(%rdx), %xmm14
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm14 = xmm0[0],mem[0],xmm0[1],mem[1]
-; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3],ymm15[4,5,6,7]
-; AVX-NEXT:    vinsertf128 $1, 224(%r8), %ymm14, %ymm14
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
-; AVX-NEXT:    vbroadcastss 224(%r9), %ymm14
-; AVX-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm14[5],ymm13[6,7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 16-byte Folded Reload
-; AVX-NEXT:    # xmm14 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT:    vshufps {{.*#+}} xmm14 = xmm14[2,3,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT:    vpermilps {{.*#+}} xmm6 = mem[2,1,3,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm6, %ymm6
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5],ymm6[6,7]
+; AVX-NEXT:    vmovaps 224(%r9), %xmm6
+; AVX-NEXT:    vshufps {{.*#+}} xmm7 = xmm6[0,2,2,3]
+; AVX-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4,5,6],ymm6[7]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm14
-; AVX-NEXT:    vpermilps {{.*#+}} xmm15 = mem[2,1,3,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm15, %ymm15
-; AVX-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5],ymm15[6,7]
-; AVX-NEXT:    vmovaps 224(%r9), %xmm15
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm15[0,2,2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm0
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2,3,4,5,6],ymm0[7]
-; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT:    vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = ymm6[3,0],mem[3,0],ymm6[7,4],mem[7,4]
-; AVX-NEXT:    vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm14[2,3]
-; AVX-NEXT:    vperm2f128 $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm14 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm14 = ymm14[2,1,3,3,6,5,7,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm14[0,1],ymm5[2,3,4,5],ymm14[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = mem[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm14 = ymm14[0,2,2,3,4,6,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm14[1],ymm5[2,3,4,5,6],ymm14[7]
+; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT:    # ymm6 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm0[1,2],ymm1[1,2],ymm0[5,6],ymm1[5,6]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm7 = ymm7[0,2,1,3,4,6,5,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX-NEXT:    vbroadcastss 244(%r8), %xmm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
+; AVX-NEXT:    vbroadcastss 244(%r9), %ymm8
+; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm6[2,3],ymm8[2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[2,1,3,3,6,5,7,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3,4,5],ymm8[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3,4,5,6],ymm8[7]
 ; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT:    vmovaps %ymm5, 1504(%rax)
-; AVX-NEXT:    vmovaps %ymm0, 1408(%rax)
-; AVX-NEXT:    vmovaps %ymm13, 1344(%rax)
-; AVX-NEXT:    vmovaps %ymm2, 1312(%rax)
-; AVX-NEXT:    vmovaps %ymm12, 1216(%rax)
-; AVX-NEXT:    vmovaps %ymm7, 1120(%rax)
-; AVX-NEXT:    vmovaps %ymm11, 1024(%rax)
-; AVX-NEXT:    vmovaps %ymm10, 928(%rax)
-; AVX-NEXT:    vmovaps %ymm9, 832(%rax)
-; AVX-NEXT:    vmovaps %ymm8, 768(%rax)
-; AVX-NEXT:    vmovaps %ymm4, 736(%rax)
-; AVX-NEXT:    vmovaps %ymm3, 640(%rax)
-; AVX-NEXT:    vmovaps %ymm1, 544(%rax)
+; AVX-NEXT:    vmovaps %ymm6, 1504(%rax)
+; AVX-NEXT:    vmovaps %ymm7, 1472(%rax)
+; AVX-NEXT:    vmovaps %ymm5, 1408(%rax)
+; AVX-NEXT:    vmovaps %ymm2, 1344(%rax)
+; AVX-NEXT:    vmovaps %ymm3, 1312(%rax)
+; AVX-NEXT:    vmovaps %ymm4, 1280(%rax)
+; AVX-NEXT:    vmovaps %ymm9, 1216(%rax)
+; AVX-NEXT:    vmovaps %ymm10, 1152(%rax)
+; AVX-NEXT:    vmovaps %ymm15, 1120(%rax)
+; AVX-NEXT:    vmovaps %ymm14, 1088(%rax)
+; AVX-NEXT:    vmovaps %ymm13, 1024(%rax)
+; AVX-NEXT:    vmovaps %ymm11, 960(%rax)
+; AVX-NEXT:    vmovaps %ymm12, 928(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 448(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 896(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 384(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 832(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 352(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 768(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 736(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 192(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 704(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 160(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 640(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 576(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, (%rax)
+; AVX-NEXT:    vmovaps %ymm0, 544(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 1472(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 512(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 1280(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 448(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 1152(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 384(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 1088(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 352(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 960(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 320(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 896(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 704(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 192(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 576(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 160(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 512(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 128(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 320(%rax)
+; AVX-NEXT:    vmovaps %ymm0, 64(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT:    vmovaps %ymm0, 128(%rax)
+; AVX-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 1440(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7382,7 +7289,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX-NEXT:    addq $2504, %rsp # imm = 0x9C8
+; AVX-NEXT:    addq $2056, %rsp # imm = 0x808
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
index 9ff685694ce02..bead2c94cf121 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
@@ -2059,83 +2059,83 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX-LABEL: store_i32_stride7_vf16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    subq $456, %rsp # imm = 0x1C8
+; AVX-NEXT:    subq $472, %rsp # imm = 0x1D8
 ; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT:    vmovaps 32(%rdi), %ymm9
-; AVX-NEXT:    vmovaps 32(%rsi), %ymm6
-; AVX-NEXT:    vmovaps 32(%rdx), %ymm7
-; AVX-NEXT:    vmovaps 32(%rcx), %ymm8
+; AVX-NEXT:    vmovaps 32(%rdi), %ymm8
+; AVX-NEXT:    vmovaps 32(%rsi), %ymm5
+; AVX-NEXT:    vmovaps 32(%rdx), %ymm6
+; AVX-NEXT:    vmovaps 32(%rcx), %ymm7
 ; AVX-NEXT:    vmovaps 32(%r8), %ymm0
-; AVX-NEXT:    vmovaps 32(%r9), %ymm1
-; AVX-NEXT:    vmovaps 32(%rax), %ymm2
-; AVX-NEXT:    vunpckhps {{.*#+}} ymm3 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
-; AVX-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm6[1],ymm9[1],ymm6[3],ymm9[3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,0],ymm9[4,5],ymm4[6,4]
-; AVX-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm3[2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm1[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm4[1,1,2,2,5,5,6,6]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6],ymm4[7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 32(%rax), %ymm1
+; AVX-NEXT:    vunpckhps {{.*#+}} ymm2 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
+; AVX-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm5[1],ymm8[1],ymm5[3],ymm8[3]
+; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,0],ymm8[4,5],ymm3[6,4]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm3[2,3],ymm2[2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[1,1,2,2,5,5,6,6]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3,4,5,6],ymm3[7]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovaps 32(%rax), %xmm4
+; AVX-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX-NEXT:    vmovaps 32(%r9), %xmm10
-; AVX-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps %xmm10, (%rsp) # 16-byte Spill
 ; AVX-NEXT:    vmovaps 32(%r8), %xmm11
 ; AVX-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps 32(%rdi), %xmm13
-; AVX-NEXT:    vmovaps 32(%rsi), %xmm12
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm12[0],xmm13[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm13[2,1]
-; AVX-NEXT:    vmovaps %xmm13, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX-NEXT:    vmovaps 32(%rcx), %xmm5
-; AVX-NEXT:    vmovaps 32(%rdx), %xmm15
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm15[0],xmm5[0],xmm15[1],xmm5[1]
-; AVX-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps %xmm5, %xmm14
-; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm4[0,1,0,1]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm12
+; AVX-NEXT:    vmovaps 32(%rsi), %xmm9
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm9[0],xmm12[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm12[2,1]
+; AVX-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX-NEXT:    vmovaps 32(%rcx), %xmm13
+; AVX-NEXT:    vmovaps 32(%rdx), %xmm14
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
+; AVX-NEXT:    vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm3[0,1,0,1]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm3 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
+; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
 ; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT:    vmovaps 32(%rax), %xmm5
-; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6],ymm3[7]
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm10[1,1],xmm11[1,1]
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[2]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6],ymm2[7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm10[1,1],xmm11[1,1]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5],ymm2[6,7]
+; AVX-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm12[1],xmm9[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm9[1,1],xmm3[0,2]
 ; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5],ymm3[6,7]
-; AVX-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm13[1],xmm12[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm12[1,1],xmm4[0,2]
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
-; AVX-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm15[1],xmm14[1],zero
-; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1,2],ymm4[3,4,5,6,7]
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5],ymm4[6,7]
-; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm6[1,1],ymm9[1,1],ymm6[5,5],ymm9[5,5]
-; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm7[1,1],ymm8[1,1],ymm7[5,5],ymm8[5,5]
-; AVX-NEXT:    vmovaps %ymm8, %ymm13
+; AVX-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm14[1],xmm13[1],zero
+; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2],ymm3[3,4,5,6,7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5],ymm3[6,7]
+; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm5[1,1],ymm8[1,1],ymm5[5,5],ymm8[5,5]
+; AVX-NEXT:    vmovaps %ymm8, %ymm4
+; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm6[1,1],ymm7[1,1],ymm6[5,5],ymm7[5,5]
 ; AVX-NEXT:    vmovaps %ymm7, %ymm11
 ; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6],ymm3[7]
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,1],ymm1[6,4],ymm0[6,5]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
+; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6],ymm2[7]
+; AVX-NEXT:    vmovaps 32(%r9), %ymm3
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[2],ymm0[2]
+; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm3[2,0],ymm0[2,1],ymm3[6,4],ymm0[6,5]
+; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm0[2,3]
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
-; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6],ymm0[7]
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6],ymm0[7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vmovaps (%rdi), %xmm5
-; AVX-NEXT:    vmovaps (%rsi), %xmm14
-; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm14[1]
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm14[1,1],xmm0[0,2]
+; AVX-NEXT:    vmovaps (%rdi), %xmm14
+; AVX-NEXT:    vmovaps (%rsi), %xmm13
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm14[1],xmm13[1]
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm13[1,1],xmm0[0,2]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX-NEXT:    vmovaps (%rcx), %xmm7
 ; AVX-NEXT:    vmovaps (%rdx), %xmm12
@@ -2144,9 +2144,9 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7]
 ; AVX-NEXT:    vmovaps (%r9), %xmm3
 ; AVX-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovaps (%r8), %xmm4
-; AVX-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm4[1,1]
+; AVX-NEXT:    vmovaps (%r8), %xmm5
+; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm5[1,1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
 ; AVX-NEXT:    vmovaps (%rax), %xmm2
 ; AVX-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2154,12 +2154,11 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; AVX-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm14[0],xmm5[0]
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm5[2,1]
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm13[0],xmm14[0]
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm14[2,1]
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
 ; AVX-NEXT:    vunpcklps {{.*#+}} xmm2 = xmm12[0],xmm7[0],xmm12[1],xmm7[1]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
@@ -2195,10 +2194,10 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7]
 ; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm13[3,3],ymm11[3,3],ymm13[7,7],ymm11[7,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm0 = ymm11[3,3],ymm6[3,3],ymm11[7,7],ymm6[7,7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm11[3,3],ymm6[3,3],ymm11[7,7],ymm6[7,7]
+; AVX-NEXT:    vshufps {{.*#+}} ymm2 = ymm4[3,3],ymm11[3,3],ymm4[7,7],ymm11[7,7]
 ; AVX-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
 ; AVX-NEXT:    vbroadcastss 60(%r8), %ymm2
@@ -2218,7 +2217,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm15 = ymm1[0],ymm0[1,2,3,4],ymm1[5,6,7]
-; AVX-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX-NEXT:    vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm0 = xmm0[3,3],mem[3,3]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
@@ -2227,32 +2226,32 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6],ymm1[7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX-NEXT:    vmovaps (%rsp), %xmm6 # 16-byte Reload
 ; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm6[2,2,2,2]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
 ; AVX-NEXT:    vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
 ; AVX-NEXT:    vbroadcastsd 40(%rax), %ymm2
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[3,3],xmm14[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm14[3,3],xmm13[3,3]
 ; AVX-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm2 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm2 = xmm12[2],mem[2],xmm12[3],mem[3]
 ; AVX-NEXT:    vinsertf128 $1, %xmm12, %ymm2, %ymm2
 ; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6],ymm2[7]
-; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm14[2,2,2,2]
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm13[2,2,2,2]
 ; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
 ; AVX-NEXT:    vblendps {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3]
 ; AVX-NEXT:    vbroadcastsd 8(%rax), %ymm3
 ; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4],ymm1[5,6,7]
-; AVX-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT:    # ymm2 = ymm11[0],mem[0],ymm11[1],mem[1],ymm11[4],mem[4],ymm11[5],mem[5]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT:    vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm11[0],ymm2[1],ymm11[1],ymm2[4],ymm11[4],ymm2[5],ymm11[5]
+; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm13[0],ymm3[2],ymm13[2]
-; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm13[3,1],ymm3[0,2],ymm13[7,5],ymm3[4,6]
+; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
+; AVX-NEXT:    vshufps {{.*#+}} ymm3 = ymm5[3,1],ymm3[0,2],ymm5[7,5],ymm3[4,6]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
 ; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[3,3],xmm6[3,3]
 ; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
@@ -2262,7 +2261,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm4 = ymm7[3,1],ymm5[0,2],ymm7[7,5],ymm5[4,6]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm12[3,3],xmm14[3,3]
+; AVX-NEXT:    vshufps {{.*#+}} xmm4 = xmm12[3,3],xmm13[3,3]
 ; AVX-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
 ; AVX-NEXT:    # xmm4 = xmm4[0,1,2],mem[3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3],ymm3[4,5,6,7]
@@ -2290,7 +2289,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vmovaps %ymm0, 416(%rax)
 ; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX-NEXT:    vmovaps %ymm0, 384(%rax)
-; AVX-NEXT:    addq $456, %rsp # imm = 0x1C8
+; AVX-NEXT:    addq $472, %rsp # imm = 0x1D8
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -4619,7 +4618,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,0],ymm4[4,5],ymm1[6,4]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufps {{.*#+}} ymm1 = ymm1[1,1,2,2,5,5,6,6]
 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
 ; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
index 1061c08dcceda..8d68f88249a9e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
@@ -168,11 +168,11 @@ define void @store_i64_stride2_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.ve
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[3],ymm1[3]
 ; AVX-NEXT:    vmovapd %ymm1, 32(%rdx)
-; AVX-NEXT:    vmovapd %ymm0, (%rdx)
+; AVX-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -353,16 +353,16 @@ define void @store_i64_stride2_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.ve
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm1[1]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm3 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[3],ymm3[3]
 ; AVX-NEXT:    vmovapd %ymm3, 96(%rdx)
 ; AVX-NEXT:    vmovapd %ymm2, 32(%rdx)
 ; AVX-NEXT:    vmovaps %ymm1, 64(%rdx)
-; AVX-NEXT:    vmovapd %ymm0, (%rdx)
+; AVX-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
@@ -637,24 +637,24 @@ define void @store_i64_stride2_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm1[1]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm5[0],xmm1[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm4 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm4 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[3],ymm4[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm6 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[3],ymm5[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm6 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[3],ymm6[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[3],ymm7[3]
 ; AVX-NEXT:    vmovapd %ymm7, 32(%rdx)
 ; AVX-NEXT:    vmovapd %ymm6, 96(%rdx)
 ; AVX-NEXT:    vmovapd %ymm5, 160(%rdx)
 ; AVX-NEXT:    vmovapd %ymm4, 224(%rdx)
 ; AVX-NEXT:    vmovaps %ymm1, 64(%rdx)
-; AVX-NEXT:    vmovapd %ymm0, (%rdx)
+; AVX-NEXT:    vmovaps %ymm0, (%rdx)
 ; AVX-NEXT:    vmovaps %ymm2, 128(%rdx)
 ; AVX-NEXT:    vmovaps %ymm3, 192(%rdx)
 ; AVX-NEXT:    vzeroupper
@@ -1122,7 +1122,7 @@ define void @store_i64_stride2_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX-NEXT:    vmovaps 128(%rsi), %xmm1
 ; AVX-NEXT:    vmovaps 128(%rdi), %xmm2
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm2[1],xmm1[1]
@@ -1158,29 +1158,29 @@ define void @store_i64_stride2_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm8[1],xmm7[1]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm8[0],xmm7[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm7, %ymm7
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[3],ymm8[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[3],ymm9[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[3],ymm10[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[3],ymm11[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm13 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm12 = ymm13[0],ymm12[0],ymm13[3],ymm12[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm13 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm13 = ymm14[0],ymm13[0],ymm14[3],ymm13[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[3],ymm14[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm15[0],ymm0[3],ymm15[3]
 ; AVX-NEXT:    vmovapd %ymm0, 416(%rdx)
 ; AVX-NEXT:    vmovapd %ymm14, 352(%rdx)
@@ -2175,55 +2175,55 @@ define void @store_i64_stride2_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
 ; AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
 ; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[3],ymm0[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm8 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[3],ymm8[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm9 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[3],ymm9[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm10 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[3],ymm10[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm11 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[3],ymm11[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm12 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm13 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm12 = ymm13[0],ymm12[0],ymm13[3],ymm12[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm13 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm13 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm13 = ymm14[0],ymm13[0],ymm14[3],ymm13[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm14 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm14 = ymm15[0],ymm14[0],ymm15[3],ymm14[3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm15 = mem[2,3,2,3]
-; AVX-NEXT:    vperm2f128 {{.*#+}} ymm7 = mem[2,3,2,3]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm15 = mem[0,1,0,1]
+; AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
 ; AVX-NEXT:    vshufpd {{.*#+}} ymm7 = ymm7[0],ymm15[0],ymm7[3],ymm15[3]
 ; AVX-NEXT:    vmovapd %ymm7, 992(%rdx)
 ; AVX-NEXT:    vmovapd %ymm14, 928(%rdx)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 887db41dcf8ad..6d499e17bfbc6 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -6368,13 +6368,13 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-LABEL: store_i8_stride6_vf64:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm7
+; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm5
 ; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm6
 ; AVX512BW-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT:    vpshufb %ymm5, %ymm1, %ymm0
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
 ; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512BW-NEXT:    vpshufb %ymm5, %ymm2, %ymm3
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm2, %ymm3
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
@@ -6382,59 +6382,57 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpermw %ymm3, %ymm8, %ymm3
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm9
 ; AVX512BW-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT:    vpshufb %ymm11, %ymm3, %ymm0
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm12 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-NEXT:    vpshufb %ymm12, %ymm3, %ymm0
 ; AVX512BW-NEXT:    vmovdqa 32(%rdx), %ymm4
-; AVX512BW-NEXT:    vpshufb %ymm11, %ymm4, %ymm10
+; AVX512BW-NEXT:    vpshufb %ymm12, %ymm4, %ymm10
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[4],ymm0[4],ymm10[5],ymm0[5],ymm10[6],ymm0[6],ymm10[7],ymm0[7],ymm10[16],ymm0[16],ymm10[17],ymm0[17],ymm10[18],ymm0[18],ymm10[19],ymm0[19],ymm10[20],ymm0[20],ymm10[21],ymm0[21],ymm10[22],ymm0[22],ymm10[23],ymm0[23]
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-NEXT:    vpermw %ymm10, %ymm12, %ymm10
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} ymm11 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-NEXT:    vpermw %ymm10, %ymm11, %ymm10
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm0
 ; AVX512BW-NEXT:    movl $613566756, %r10d # imm = 0x24924924
 ; AVX512BW-NEXT:    kmovd %r10d, %k1
 ; AVX512BW-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k1}
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm7[4,5,6,7,4,5,6,7]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[4,5,6,7,4,5,6,7]
 ; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm14 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,10,13,12,11,14,0,0,15,10,13,12,11,14,0,0,15]
-; AVX512BW-NEXT:    vpshufb %zmm14, %zmm9, %zmm9
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT:    vpshufb %zmm14, %zmm5, %zmm5
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[2,2,2,3,6,6,6,7]
 ; AVX512BW-NEXT:    movl $-1840700270, %r10d # imm = 0x92492492
 ; AVX512BW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k2}
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm6[4,5,6,7,4,5,6,7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm9 = zmm9[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %zmm5, %zmm0 {%k2}
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[4,5,6,7,4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm5 = zmm5[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[2,2,2,3,6,6,6,7]
 ; AVX512BW-NEXT:    movabsq $-9076969306111049208, %r10 # imm = 0x8208208208208208
 ; AVX512BW-NEXT:    kmovq %r10, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm0 {%k3}
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm0 {%k3}
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm9
-; AVX512BW-NEXT:    vpshufb %ymm5, %ymm9, %ymm13
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm9, %ymm5
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512BW-NEXT:    vpshufb %ymm5, %ymm10, %ymm5
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[4],ymm13[4],ymm5[5],ymm13[5],ymm5[6],ymm13[6],ymm5[7],ymm13[7],ymm5[16],ymm13[16],ymm5[17],ymm13[17],ymm5[18],ymm13[18],ymm5[19],ymm13[19],ymm5[20],ymm13[20],ymm5[21],ymm13[21],ymm5[22],ymm13[22],ymm5[23],ymm13[23]
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm10, %ymm6
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[16],ymm5[16],ymm6[17],ymm5[17],ymm6[18],ymm5[18],ymm6[19],ymm5[19],ymm6[20],ymm5[20],ymm6[21],ymm5[21],ymm6[22],ymm5[22],ymm6[23],ymm5[23]
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
-; AVX512BW-NEXT:    vpermw %ymm13, %ymm8, %ymm8
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm8
-; AVX512BW-NEXT:    vmovdqa64 (%rcx), %ymm16
-; AVX512BW-NEXT:    vpshufb %ymm11, %ymm16, %ymm5
-; AVX512BW-NEXT:    vmovdqa64 (%rdx), %ymm17
-; AVX512BW-NEXT:    vpshufb %ymm11, %ymm17, %ymm13
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm13[0],ymm5[0],ymm13[1],ymm5[1],ymm13[2],ymm5[2],ymm13[3],ymm5[3],ymm13[4],ymm5[4],ymm13[5],ymm5[5],ymm13[6],ymm5[6],ymm13[7],ymm5[7],ymm13[16],ymm5[16],ymm13[17],ymm5[17],ymm13[18],ymm5[18],ymm13[19],ymm5[19],ymm13[20],ymm5[20],ymm13[21],ymm5[21],ymm13[22],ymm5[22],ymm13[23],ymm5[23]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
+; AVX512BW-NEXT:    vpermw %ymm6, %ymm8, %ymm6
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 (%rcx), %ymm17
+; AVX512BW-NEXT:    vpshufb %ymm12, %ymm17, %ymm5
+; AVX512BW-NEXT:    vmovdqa64 (%rdx), %ymm19
+; AVX512BW-NEXT:    vpshufb %ymm12, %ymm19, %ymm7
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[4],ymm5[4],ymm7[5],ymm5[5],ymm7[6],ymm5[6],ymm7[7],ymm5[7],ymm7[16],ymm5[16],ymm7[17],ymm5[17],ymm7[18],ymm5[18],ymm7[19],ymm5[19],ymm7[20],ymm5[20],ymm7[21],ymm5[21],ymm7[22],ymm5[22],ymm7[23],ymm5[23]
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512BW-NEXT:    vpermw %ymm13, %ymm12, %ymm12
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm5
-; AVX512BW-NEXT:    vmovdqu16 %zmm8, %zmm5 {%k1}
-; AVX512BW-NEXT:    vmovdqa (%r8), %ymm13
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm7
-; AVX512BW-NEXT:    vpshufb %zmm14, %zmm7, %zmm7
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,2,2,3,6,6,6,7]
-; AVX512BW-NEXT:    vmovdqu16 %zmm7, %zmm5 {%k2}
-; AVX512BW-NEXT:    vmovdqa (%r9), %ymm12
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm6, %zmm6
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm19[8],ymm17[8],ymm19[9],ymm17[9],ymm19[10],ymm17[10],ymm19[11],ymm17[11],ymm19[12],ymm17[12],ymm19[13],ymm17[13],ymm19[14],ymm17[14],ymm19[15],ymm17[15],ymm19[24],ymm17[24],ymm19[25],ymm17[25],ymm19[26],ymm17[26],ymm19[27],ymm17[27],ymm19[28],ymm17[28],ymm19[29],ymm17[29],ymm19[30],ymm17[30],ymm19[31],ymm17[31]
+; AVX512BW-NEXT:    vpermw %ymm7, %ymm11, %ymm7
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
+; AVX512BW-NEXT:    vmovdqu16 %zmm6, %zmm5 {%k1}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpshufb %zmm14, %zmm13, %zmm6
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %zmm6, %zmm5 {%k2}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm6 = zmm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
 ; AVX512BW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
 ; AVX512BW-NEXT:    vmovdqu8 %zmm6, %zmm5 {%k3}
 ; AVX512BW-NEXT:    vmovdqa64 (%rsi), %xmm21
@@ -6446,101 +6444,99 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpshufb %xmm20, %xmm8, %xmm14
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm14[8],xmm6[8],xmm14[9],xmm6[9],xmm14[10],xmm6[10],xmm14[11],xmm6[11],xmm14[12],xmm6[12],xmm14[13],xmm6[13],xmm14[14],xmm6[14],xmm14[15],xmm6[15]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm27 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512BW-NEXT:    vpermt2w %zmm14, %zmm27, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 (%rcx), %xmm25
-; AVX512BW-NEXT:    vmovdqa 32(%rcx), %xmm14
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm14, %xmm15
-; AVX512BW-NEXT:    vmovdqa64 (%rdx), %xmm26
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm25 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512BW-NEXT:    vpermt2w %zmm14, %zmm25, %zmm6
+; AVX512BW-NEXT:    vmovdqa64 (%rcx), %xmm23
+; AVX512BW-NEXT:    vmovdqa 32(%rcx), %xmm15
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm15, %xmm14
+; AVX512BW-NEXT:    vmovdqa64 (%rdx), %xmm24
 ; AVX512BW-NEXT:    vmovdqa64 32(%rdx), %xmm18
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm18, %xmm19
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm18[0],xmm14[0],xmm18[1],xmm14[1],xmm18[2],xmm14[2],xmm18[3],xmm14[3],xmm18[4],xmm14[4],xmm18[5],xmm14[5],xmm18[6],xmm14[6],xmm18[7],xmm14[7]
-; AVX512BW-NEXT:    vprold $16, %xmm19, %xmm19
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm15, %zmm19, %zmm15
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm15 = zmm15[0,0,0,1,4,4,4,5]
-; AVX512BW-NEXT:    vmovdqu16 %zmm15, %zmm6 {%k2}
-; AVX512BW-NEXT:    vmovdqa64 (%r8), %xmm23
-; AVX512BW-NEXT:    vmovdqa 32(%r8), %xmm15
-; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm19 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero,xmm15[4],zero,xmm15[5],zero,xmm15[6],zero,xmm15[7],zero
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm24 = xmm15[2,1,2,3]
-; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm24 = xmm24[0],zero,xmm24[1],zero,xmm24[2],zero,xmm24[3],zero,xmm24[4],zero,xmm24[5],zero,xmm24[6],zero,xmm24[7],zero
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512BW-NEXT:    vpermt2w %zmm24, %zmm28, %zmm19
-; AVX512BW-NEXT:    vmovdqu16 %zmm19, %zmm6 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 (%r9), %xmm24
-; AVX512BW-NEXT:    vmovdqa64 32(%r9), %xmm19
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm29 = xmm19[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm30 = xmm19[2,1,2,3]
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm30 = xmm30[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT:    vpermt2w %zmm30, %zmm28, %zmm29
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm18, %xmm16
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm18[0],xmm15[0],xmm18[1],xmm15[1],xmm18[2],xmm15[2],xmm18[3],xmm15[3],xmm18[4],xmm15[4],xmm18[5],xmm15[5],xmm18[6],xmm15[6],xmm18[7],xmm15[7]
+; AVX512BW-NEXT:    vprold $16, %xmm16, %xmm16
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT:    vmovdqu16 %zmm14, %zmm6 {%k2}
+; AVX512BW-NEXT:    vmovdqa 32(%r8), %xmm14
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm16 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero,xmm14[4],zero,xmm14[5],zero,xmm14[6],zero,xmm14[7],zero
+; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm26 = xmm14[2,1,2,3]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm26 = xmm26[0],zero,xmm26[1],zero,xmm26[2],zero,xmm26[3],zero,xmm26[4],zero,xmm26[5],zero,xmm26[6],zero,xmm26[7],zero
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm27 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
+; AVX512BW-NEXT:    vpermt2w %zmm26, %zmm27, %zmm16
+; AVX512BW-NEXT:    vmovdqu16 %zmm16, %zmm6 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 32(%r9), %xmm16
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm26 = xmm16[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm28 = xmm16[2,1,2,3]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm28 = xmm28[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT:    vpermt2w %zmm28, %zmm27, %zmm26
 ; AVX512BW-NEXT:    movabsq $585610922974906400, %rcx # imm = 0x820820820820820
 ; AVX512BW-NEXT:    kmovq %rcx, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm29, %zmm6 {%k3}
-; AVX512BW-NEXT:    vpshufb %xmm20, %xmm21, %xmm29
+; AVX512BW-NEXT:    vmovdqu8 %zmm26, %zmm6 {%k3}
+; AVX512BW-NEXT:    vpshufb %xmm20, %xmm21, %xmm26
 ; AVX512BW-NEXT:    vpshufb %xmm20, %xmm22, %xmm20
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm29 = xmm20[8],xmm29[8],xmm20[9],xmm29[9],xmm20[10],xmm29[10],xmm20[11],xmm29[11],xmm20[12],xmm29[12],xmm20[13],xmm29[13],xmm20[14],xmm29[14],xmm20[15],xmm29[15]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm26 = xmm20[8],xmm26[8],xmm20[9],xmm26[9],xmm20[10],xmm26[10],xmm20[11],xmm26[11],xmm20[12],xmm26[12],xmm20[13],xmm26[13],xmm20[14],xmm26[14],xmm20[15],xmm26[15]
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm20 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
-; AVX512BW-NEXT:    vpermt2w %zmm29, %zmm27, %zmm20
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm25, %xmm27
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm26, %xmm11
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm27[0],xmm11[1],xmm27[1],xmm11[2],xmm27[2],xmm11[3],xmm27[3],xmm11[4],xmm27[4],xmm11[5],xmm27[5],xmm11[6],xmm27[6],xmm11[7],xmm27[7]
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm26[0],xmm25[0],xmm26[1],xmm25[1],xmm26[2],xmm25[2],xmm26[3],xmm25[3],xmm26[4],xmm25[4],xmm26[5],xmm25[5],xmm26[6],xmm25[6],xmm26[7],xmm25[7]
-; AVX512BW-NEXT:    vprold $16, %xmm27, %xmm27
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm27, %zmm11
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[0,0,0,1,4,4,4,5]
-; AVX512BW-NEXT:    vmovdqu16 %zmm11, %zmm20 {%k2}
-; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm11 = xmm23[0],zero,xmm23[1],zero,xmm23[2],zero,xmm23[3],zero,xmm23[4],zero,xmm23[5],zero,xmm23[6],zero,xmm23[7],zero
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm27 = xmm23[2,1,2,3]
-; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm27 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero,xmm27[4],zero,xmm27[5],zero,xmm27[6],zero,xmm27[7],zero
-; AVX512BW-NEXT:    vpermt2w %zmm27, %zmm28, %zmm11
-; AVX512BW-NEXT:    vmovdqu16 %zmm11, %zmm20 {%k1}
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm27 = xmm24[2,1,2,3]
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT:    vpermt2w %zmm27, %zmm28, %zmm11
-; AVX512BW-NEXT:    vmovdqu8 %zmm11, %zmm20 {%k3}
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm17 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512BW-NEXT:    vpermt2w %zmm11, %zmm17, %zmm16
+; AVX512BW-NEXT:    vpermt2w %zmm26, %zmm25, %zmm20
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm23, %xmm25
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm24, %xmm12
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm25[0],xmm12[1],xmm25[1],xmm12[2],xmm25[2],xmm12[3],xmm25[3],xmm12[4],xmm25[4],xmm12[5],xmm25[5],xmm12[6],xmm25[6],xmm12[7],xmm25[7]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm24[0],xmm23[0],xmm24[1],xmm23[1],xmm24[2],xmm23[2],xmm24[3],xmm23[3],xmm24[4],xmm23[4],xmm24[5],xmm23[5],xmm24[6],xmm23[6],xmm24[7],xmm23[7]
+; AVX512BW-NEXT:    vprold $16, %xmm25, %xmm25
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm25, %zmm12
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT:    vmovdqu16 %zmm12, %zmm20 {%k2}
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm12 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
+; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm25 = xmm13[2,1,2,3]
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm25 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero,xmm25[4],zero,xmm25[5],zero,xmm25[6],zero,xmm25[7],zero
+; AVX512BW-NEXT:    vpermt2w %zmm25, %zmm27, %zmm12
+; AVX512BW-NEXT:    vmovdqu16 %zmm12, %zmm20 {%k1}
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} xmm25 = xmm11[2,1,2,3]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm25[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT:    vpermt2w %zmm25, %zmm27, %zmm12
+; AVX512BW-NEXT:    vmovdqu8 %zmm12, %zmm20 {%k3}
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm19[0],ymm17[0],ymm19[1],ymm17[1],ymm19[2],ymm17[2],ymm19[3],ymm17[3],ymm19[4],ymm17[4],ymm19[5],ymm17[5],ymm19[6],ymm17[6],ymm19[7],ymm17[7],ymm19[16],ymm17[16],ymm19[17],ymm17[17],ymm19[18],ymm17[18],ymm19[19],ymm17[19],ymm19[20],ymm17[20],ymm19[21],ymm17[21],ymm19[22],ymm17[22],ymm19[23],ymm17[23]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm17 = xmm24[8],xmm23[8],xmm24[9],xmm23[9],xmm24[10],xmm23[10],xmm24[11],xmm23[11],xmm24[12],xmm23[12],xmm24[13],xmm23[13],xmm24[14],xmm23[14],xmm24[15],xmm23[15]
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm19 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
+; AVX512BW-NEXT:    vpermt2w %zmm12, %zmm19, %zmm17
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512BW-NEXT:    vpermt2w %zmm9, %zmm11, %zmm10
-; AVX512BW-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k1}
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm12 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
+; AVX512BW-NEXT:    vpermt2w %zmm9, %zmm12, %zmm10
+; AVX512BW-NEXT:    vmovdqu16 %zmm17, %zmm10 {%k1}
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
 ; AVX512BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpshufb %ymm9, %ymm13, %ymm13
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT:    vpshufb %ymm9, %ymm13, %ymm17
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm21 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
-; AVX512BW-NEXT:    vpermt2w %zmm13, %zmm21, %zmm16
+; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm21, %zmm13
 ; AVX512BW-NEXT:    movl $1227133513, %ecx # imm = 0x49249249
 ; AVX512BW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %zmm13, %zmm10 {%k2}
 ; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
 ; AVX512BW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpshufb %ymm13, %ymm12, %ymm12
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm24[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT:    vpermt2w %zmm12, %zmm21, %zmm16
+; AVX512BW-NEXT:    vpshufb %ymm13, %ymm11, %ymm17
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT:    vpermt2w %zmm17, %zmm21, %zmm11
 ; AVX512BW-NEXT:    movabsq $2342443691899625602, %rcx # imm = 0x2082082082082082
 ; AVX512BW-NEXT:    kmovq %rcx, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm16, %zmm10 {%k3}
+; AVX512BW-NEXT:    vmovdqu8 %zmm11, %zmm10 {%k3}
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm18[8],xmm14[8],xmm18[9],xmm14[9],xmm18[10],xmm14[10],xmm18[11],xmm14[11],xmm18[12],xmm14[12],xmm18[13],xmm14[13],xmm18[14],xmm14[14],xmm18[15],xmm14[15]
-; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm17, %zmm4
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm18[8],xmm15[8],xmm18[9],xmm15[9],xmm18[10],xmm15[10],xmm18[11],xmm15[11],xmm18[12],xmm15[12],xmm18[13],xmm15[13],xmm18[14],xmm15[14],xmm18[15],xmm15[15]
+; AVX512BW-NEXT:    vpermt2w %zmm3, %zmm19, %zmm4
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
 ; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
-; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm11, %zmm2
+; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm12, %zmm2
 ; AVX512BW-NEXT:    vmovdqu16 %zmm4, %zmm2 {%k1}
 ; AVX512BW-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512BW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm14[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm21, %zmm3
 ; AVX512BW-NEXT:    vmovdqu16 %zmm3, %zmm2 {%k2}
 ; AVX512BW-NEXT:    vmovdqa 32(%r9), %ymm1
 ; AVX512BW-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm19[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm16[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512BW-NEXT:    vpermt2w %zmm1, %zmm21, %zmm3
 ; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k3}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, 256(%rax)
@@ -6554,198 +6550,194 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512BW-FCP-LABEL: store_i8_stride6_vf64:
 ; AVX512BW-FCP:       # %bb.0:
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm6, %ymm0
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm7
-; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm7, %ymm2
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11],ymm7[12],ymm6[12],ymm7[13],ymm6[13],ymm7[14],ymm6[14],ymm7[15],ymm6[15],ymm7[24],ymm6[24],ymm7[25],ymm6[25],ymm7[26],ymm6[26],ymm7[27],ymm6[27],ymm7[28],ymm6[28],ymm7[29],ymm6[29],ymm7[30],ymm6[30],ymm7[31],ymm6[31]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm1 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FCP-NEXT:    vpermw %ymm3, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm5
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm10
-; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm10, %ymm0
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdx), %ymm11
-; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm11, %ymm4
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15],ymm11[24],ymm10[24],ymm11[25],ymm10[25],ymm11[26],ymm10[26],ymm11[27],ymm10[27],ymm11[28],ymm10[28],ymm11[29],ymm10[29],ymm11[30],ymm10[30],ymm11[31],ymm10[31]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm4 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-FCP-NEXT:    vpermw %ymm8, %ymm4, %ymm8
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512BW-FCP-NEXT:    movl $613566756, %eax # imm = 0x24924924
-; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm5, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT:    vmovdqa (%rcx), %ymm5
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %xmm23
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rcx), %xmm17
-; AVX512BW-FCP-NEXT:    vpshufb %xmm3, %xmm17, %xmm8
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %xmm25
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rdx), %xmm21
-; AVX512BW-FCP-NEXT:    vpshufb %xmm3, %xmm21, %xmm9
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm21[0],xmm17[0],xmm21[1],xmm17[1],xmm21[2],xmm17[2],xmm21[3],xmm17[3],xmm21[4],xmm17[4],xmm21[5],xmm17[5],xmm21[6],xmm17[6],xmm21[7],xmm17[7]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm13 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm8, %zmm13, %zmm9
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm30
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rsi), %xmm18
-; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} xmm15 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm15, %xmm18, %xmm8
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %xmm31
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rdi), %xmm24
-; AVX512BW-FCP-NEXT:    vpshufb %xmm15, %xmm24, %xmm12
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm24[0],xmm18[0],xmm24[1],xmm18[1],xmm24[2],xmm18[2],xmm24[3],xmm18[3],xmm24[4],xmm18[4],xmm24[5],xmm18[5],xmm24[6],xmm18[6],xmm24[7],xmm18[7]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm16 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm12, %zmm16, %zmm8
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %xmm26
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r8), %xmm20
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} xmm19 = [8,9,0,0,0,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm19, %xmm20, %xmm14
-; AVX512BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm12 = xmm20[0],zero,xmm20[1],zero,xmm20[2],zero,xmm20[3],zero,xmm20[4],zero,xmm20[5],zero,xmm20[6],zero,xmm20[7],zero
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm14, %zmm28, %zmm12
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %xmm27
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r9), %xmm22
-; AVX512BW-FCP-NEXT:    vpshufb %xmm3, %xmm23, %xmm14
-; AVX512BW-FCP-NEXT:    vpshufb %xmm3, %xmm25, %xmm29
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm29 = xmm29[0],xmm14[0],xmm29[1],xmm14[1],xmm29[2],xmm14[2],xmm29[3],xmm14[3],xmm29[4],xmm14[4],xmm29[5],xmm14[5],xmm29[6],xmm14[6],xmm29[7],xmm14[7]
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm25[0],xmm23[0],xmm25[1],xmm23[1],xmm25[2],xmm23[2],xmm25[3],xmm23[3],xmm25[4],xmm23[4],xmm25[5],xmm23[5],xmm25[6],xmm23[6],xmm25[7],xmm23[7]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm29, %zmm13, %zmm14
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [u,8,u,9,u,10,u,11,u,4,u,5,u,6,u,7]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm15, %xmm30, %xmm13
-; AVX512BW-FCP-NEXT:    vpshufb %xmm15, %xmm31, %xmm15
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm31[0],xmm30[0],xmm31[1],xmm30[1],xmm31[2],xmm30[2],xmm31[3],xmm30[3],xmm31[4],xmm30[4],xmm31[5],xmm30[5],xmm31[6],xmm30[6],xmm31[7],xmm30[7]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm15, %zmm16, %zmm13
-; AVX512BW-FCP-NEXT:    vpshufb %xmm29, %xmm22, %xmm16
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm22[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm16, %zmm28, %zmm15
-; AVX512BW-FCP-NEXT:    vpshufb %xmm19, %xmm26, %xmm19
-; AVX512BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm16 = xmm26[0],zero,xmm26[1],zero,xmm26[2],zero,xmm26[3],zero,xmm26[4],zero,xmm26[5],zero,xmm26[6],zero,xmm26[7],zero
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm19, %zmm28, %zmm16
-; AVX512BW-FCP-NEXT:    vpshufb %xmm29, %xmm27, %xmm29
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm29, %zmm28, %zmm19
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %ymm28
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm29 = ymm28[0],ymm5[0],ymm28[1],ymm5[1],ymm28[2],ymm5[2],ymm28[3],ymm5[3],ymm28[4],ymm5[4],ymm28[5],ymm5[5],ymm28[6],ymm5[6],ymm28[7],ymm5[7],ymm28[16],ymm5[16],ymm28[17],ymm5[17],ymm28[18],ymm5[18],ymm28[19],ymm5[19],ymm28[20],ymm5[20],ymm28[21],ymm5[21],ymm28[22],ymm5[22],ymm28[23],ymm5[23]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm25 = xmm25[8],xmm23[8],xmm25[9],xmm23[9],xmm25[10],xmm23[10],xmm25[11],xmm23[11],xmm25[12],xmm23[12],xmm25[13],xmm23[13],xmm25[14],xmm23[14],xmm25[15],xmm23[15]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm29, %zmm1, %zmm25
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %ymm29
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm31[8],xmm30[8],xmm31[9],xmm30[9],xmm31[10],xmm30[10],xmm31[11],xmm30[11],xmm31[12],xmm30[12],xmm31[13],xmm30[13],xmm31[14],xmm30[14],xmm31[15],xmm30[15]
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %ymm30
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm31 = ymm30[0],ymm29[0],ymm30[1],ymm29[1],ymm30[2],ymm29[2],ymm30[3],ymm29[3],ymm30[4],ymm29[4],ymm30[5],ymm29[5],ymm30[6],ymm29[6],ymm30[7],ymm29[7],ymm30[16],ymm29[16],ymm30[17],ymm29[17],ymm30[18],ymm29[18],ymm30[19],ymm29[19],ymm30[20],ymm29[20],ymm30[21],ymm29[21],ymm30[22],ymm29[22],ymm30[23],ymm29[23]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm31, %zmm2, %zmm23
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[16],ymm10[16],ymm11[17],ymm10[17],ymm11[18],ymm10[18],ymm11[19],ymm10[19],ymm11[20],ymm10[20],ymm11[21],ymm10[21],ymm11[22],ymm10[22],ymm11[23],ymm10[23]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm21[8],xmm17[8],xmm21[9],xmm17[9],xmm21[10],xmm17[10],xmm21[11],xmm17[11],xmm21[12],xmm17[12],xmm21[13],xmm17[13],xmm21[14],xmm17[14],xmm21[15],xmm17[15]
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %ymm17
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm11, %zmm1, %zmm10
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm10
+; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm0, %ymm10, %ymm1
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm11
+; AVX512BW-FCP-NEXT:    vpshufb %ymm0, %ymm11, %ymm2
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,2,2,3]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15],ymm11[24],ymm10[24],ymm11[25],ymm10[25],ymm11[26],ymm10[26],ymm11[27],ymm10[27],ymm11[28],ymm10[28],ymm11[29],ymm10[29],ymm11[30],ymm10[30],ymm11[31],ymm10[31]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm0 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FCP-NEXT:    vpermw %ymm3, %ymm0, %ymm3
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm3
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm4
+; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-FCP-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512BW-FCP-NEXT:    vmovdqa (%rdi), %ymm6
+; AVX512BW-FCP-NEXT:    vmovdqa (%rcx), %ymm7
+; AVX512BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %xmm21
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rcx), %xmm16
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %xmm22
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rdx), %xmm20
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm24
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rsi), %xmm15
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %xmm26
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rdi), %xmm19
+; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm16, %xmm12
+; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm20, %xmm13
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm20[0],xmm16[0],xmm20[1],xmm16[1],xmm20[2],xmm16[2],xmm20[3],xmm16[3],xmm20[4],xmm16[4],xmm20[5],xmm16[5],xmm20[6],xmm16[6],xmm20[7],xmm16[7]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm18 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm12, %zmm18, %zmm13
+; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} xmm23 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm23, %xmm15, %xmm12
+; AVX512BW-FCP-NEXT:    vpshufb %xmm23, %xmm19, %xmm14
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm25 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm14, %zmm25, %zmm12
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r8), %xmm28
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} xmm27 = [8,9,0,0,0,5,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm27, %xmm28, %xmm17
+; AVX512BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm14 = xmm28[0],zero,xmm28[1],zero,xmm28[2],zero,xmm28[3],zero,xmm28[4],zero,xmm28[5],zero,xmm28[6],zero,xmm28[7],zero
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm30 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm17, %zmm30, %zmm14
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r9), %xmm29
+; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm21, %xmm17
+; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm22, %xmm31
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm31 = xmm31[0],xmm17[0],xmm31[1],xmm17[1],xmm31[2],xmm17[2],xmm31[3],xmm17[3],xmm31[4],xmm17[4],xmm31[5],xmm17[5],xmm31[6],xmm17[6],xmm31[7],xmm17[7]
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm31, %zmm18, %zmm17
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [u,8,u,9,u,10,u,11,u,4,u,5,u,6,u,7]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm23, %xmm24, %xmm18
+; AVX512BW-FCP-NEXT:    vpshufb %xmm23, %xmm26, %xmm23
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm23[8],xmm18[8],xmm23[9],xmm18[9],xmm23[10],xmm18[10],xmm23[11],xmm18[11],xmm23[12],xmm18[12],xmm23[13],xmm18[13],xmm23[14],xmm18[14],xmm23[15],xmm18[15]
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm18 = xmm26[0],xmm24[0],xmm26[1],xmm24[1],xmm26[2],xmm24[2],xmm26[3],xmm24[3],xmm26[4],xmm24[4],xmm26[5],xmm24[5],xmm26[6],xmm24[6],xmm26[7],xmm24[7]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm23, %zmm25, %zmm18
+; AVX512BW-FCP-NEXT:    vpshufb %xmm31, %xmm29, %xmm25
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm23 = xmm29[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm25, %zmm30, %zmm23
+; AVX512BW-FCP-NEXT:    vpshufb %xmm27, %xmm8, %xmm27
+; AVX512BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm25 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm27, %zmm30, %zmm25
+; AVX512BW-FCP-NEXT:    vpshufb %xmm31, %xmm9, %xmm31
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm31, %zmm30, %zmm27
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %ymm30
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm31 = ymm30[0],ymm7[0],ymm30[1],ymm7[1],ymm30[2],ymm7[2],ymm30[3],ymm7[3],ymm30[4],ymm7[4],ymm30[5],ymm7[5],ymm30[6],ymm7[6],ymm30[7],ymm7[7],ymm30[16],ymm7[16],ymm30[17],ymm7[17],ymm30[18],ymm7[18],ymm30[19],ymm7[19],ymm30[20],ymm7[20],ymm30[21],ymm7[21],ymm30[22],ymm7[22],ymm30[23],ymm7[23]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm31, %zmm0, %zmm22
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm31 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[16],ymm5[16],ymm6[17],ymm5[17],ymm6[18],ymm5[18],ymm6[19],ymm5[19],ymm6[20],ymm5[20],ymm6[21],ymm5[21],ymm6[22],ymm5[22],ymm6[23],ymm5[23]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm21 = xmm26[8],xmm24[8],xmm26[9],xmm24[9],xmm26[10],xmm24[10],xmm26[11],xmm24[11],xmm26[12],xmm24[12],xmm26[13],xmm24[13],xmm26[14],xmm24[14],xmm26[15],xmm24[15]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm24 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm31, %zmm24, %zmm21
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rdx), %ymm26
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm20[8],xmm16[8],xmm20[9],xmm16[9],xmm20[10],xmm16[10],xmm20[11],xmm16[11],xmm20[12],xmm16[12],xmm20[13],xmm16[13],xmm20[14],xmm16[14],xmm20[15],xmm16[15]
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm26[0],ymm4[0],ymm26[1],ymm4[1],ymm26[2],ymm4[2],ymm26[3],ymm4[3],ymm26[4],ymm4[4],ymm26[5],ymm4[5],ymm26[6],ymm4[6],ymm26[7],ymm4[7],ymm26[16],ymm4[16],ymm26[17],ymm4[17],ymm26[18],ymm4[18],ymm26[19],ymm4[19],ymm26[20],ymm4[20],ymm26[21],ymm4[21],ymm26[22],ymm4[22],ymm26[23],ymm4[23]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm20, %zmm0, %zmm16
 ; AVX512BW-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
 ; AVX512BW-FCP-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm17, %ymm21
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm26[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm26 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm21, %zmm26, %zmm11
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[4],ymm6[4],ymm7[5],ymm6[5],ymm7[6],ymm6[6],ymm7[7],ymm6[7],ymm7[16],ymm6[16],ymm7[17],ymm6[17],ymm7[18],ymm6[18],ymm7[19],ymm6[19],ymm7[20],ymm6[20],ymm7[21],ymm6[21],ymm7[22],ymm6[22],ymm7[23],ymm6[23]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm24[8],xmm18[8],xmm24[9],xmm18[9],xmm24[10],xmm18[10],xmm24[11],xmm18[11],xmm24[12],xmm18[12],xmm24[13],xmm18[13],xmm24[14],xmm18[14],xmm24[15],xmm18[15]
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %ymm21
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm7, %zmm2, %zmm6
-; AVX512BW-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
-; AVX512BW-FCP-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm21, %ymm18
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm27[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm18, %zmm26, %zmm7
-; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r8), %ymm18
-; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm18, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm24
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm18 = xmm20[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm1, %zmm26, %zmm18
+; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm8, %ymm31
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm31, %zmm0, %zmm20
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[16],ymm10[16],ymm11[17],ymm10[17],ymm11[18],ymm10[18],ymm11[19],ymm10[19],ymm11[20],ymm10[20],ymm11[21],ymm10[21],ymm11[22],ymm10[22],ymm11[23],ymm10[23]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm19[8],xmm15[8],xmm19[9],xmm15[9],xmm19[10],xmm15[10],xmm19[11],xmm15[11],xmm19[12],xmm15[12],xmm19[13],xmm15[13],xmm19[14],xmm15[14],xmm19[15],xmm15[15]
+; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
+; AVX512BW-FCP-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm11, %zmm24, %zmm10
+; AVX512BW-FCP-NEXT:    vpshufb %ymm19, %ymm9, %ymm11
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm11, %zmm0, %zmm15
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%r8), %ymm11
+; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm28[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm1, %zmm0, %zmm11
 ; AVX512BW-FCP-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm22[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FCP-NEXT:    vpermt2w %zmm1, %zmm26, %zmm20
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm24[4,5,6,7,4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm22 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,10,13,12,11,14,0,0,15,10,13,12,11,14,0,0,15]
-; AVX512BW-FCP-NEXT:    vpshufb %zmm22, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm19, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm19 = xmm29[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FCP-NEXT:    vpermt2w %zmm1, %zmm0, %zmm19
+; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
+; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm26, %ymm1
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm26[8],ymm4[8],ymm26[9],ymm4[9],ymm26[10],ymm4[10],ymm26[11],ymm4[11],ymm26[12],ymm4[12],ymm26[13],ymm4[13],ymm26[14],ymm4[14],ymm26[15],ymm4[15],ymm26[24],ymm4[24],ymm26[25],ymm4[25],ymm26[26],ymm4[26],ymm26[27],ymm4[27],ymm26[28],ymm4[28],ymm26[29],ymm4[29],ymm26[30],ymm4[30],ymm26[31],ymm4[31]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm24 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-FCP-NEXT:    vpermw %ymm1, %ymm24, %ymm1
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm4
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
+; AVX512BW-FCP-NEXT:    movl $613566756, %eax # imm = 0x24924924
+; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm3, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7,4,5,6,7]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,10,13,12,11,14,0,0,15,10,13,12,11,14,0,0,15]
+; AVX512BW-FCP-NEXT:    vpshufb %zmm1, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
 ; AVX512BW-FCP-NEXT:    movl $-1840700270, %eax # imm = 0x92492492
 ; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k2}
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[4,5,6,7,4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm0, %zmm4 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm0
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7,4,5,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
 ; AVX512BW-FCP-NEXT:    movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k3}
-; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm26 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm26, %ymm29, %ymm1
-; AVX512BW-FCP-NEXT:    vpshufb %ymm26, %ymm30, %ymm26
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm26[0],ymm1[0],ymm26[1],ymm1[1],ymm26[2],ymm1[2],ymm26[3],ymm1[3],ymm26[4],ymm1[4],ymm26[5],ymm1[5],ymm26[6],ymm1[6],ymm26[7],ymm1[7],ymm26[16],ymm1[16],ymm26[17],ymm1[17],ymm26[18],ymm1[18],ymm26[19],ymm1[19],ymm26[20],ymm1[20],ymm26[21],ymm1[21],ymm26[22],ymm1[22],ymm26[23],ymm1[23]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm26 = ymm30[8],ymm29[8],ymm30[9],ymm29[9],ymm30[10],ymm29[10],ymm30[11],ymm29[11],ymm30[12],ymm29[12],ymm30[13],ymm29[13],ymm30[14],ymm29[14],ymm30[15],ymm29[15],ymm30[24],ymm29[24],ymm30[25],ymm29[25],ymm30[26],ymm29[26],ymm30[27],ymm29[27],ymm30[28],ymm29[28],ymm30[29],ymm29[29],ymm30[30],ymm29[30],ymm30[31],ymm29[31]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm27 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FCP-NEXT:    vpermw %ymm26, %ymm27, %ymm26
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm26, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm5, %ymm26
-; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm28, %ymm3
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm26[0],ymm3[1],ymm26[1],ymm3[2],ymm26[2],ymm3[3],ymm26[3],ymm3[4],ymm26[4],ymm3[5],ymm26[5],ymm3[6],ymm26[6],ymm3[7],ymm26[7],ymm3[16],ymm26[16],ymm3[17],ymm26[17],ymm3[18],ymm26[18],ymm3[19],ymm26[19],ymm3[20],ymm26[20],ymm3[21],ymm26[21],ymm3[22],ymm26[22],ymm3[23],ymm26[23]
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm28[8],ymm5[8],ymm28[9],ymm5[9],ymm28[10],ymm5[10],ymm28[11],ymm5[11],ymm28[12],ymm5[12],ymm28[13],ymm5[13],ymm28[14],ymm5[14],ymm28[15],ymm5[15],ymm28[24],ymm5[24],ymm28[25],ymm5[25],ymm28[26],ymm5[26],ymm28[27],ymm5[27],ymm28[28],ymm5[28],ymm28[29],ymm5[29],ymm28[30],ymm5[30],ymm28[31],ymm5[31]
-; AVX512BW-FCP-NEXT:    vpermw %ymm5, %ymm4, %ymm4
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k1}
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm17, %zmm24, %zmm1
-; AVX512BW-FCP-NEXT:    vpshufb %zmm22, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k2}
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm21, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm3 {%k3}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm9, %zmm8 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm12, %zmm8 {%k1}
-; AVX512BW-FCP-NEXT:    movabsq $585610922974906400, %rax # imm = 0x820820820820820
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm15, %zmm8 {%k3}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm14, %zmm13 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm16, %zmm13 {%k1}
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm19, %zmm13 {%k3}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm25, %zmm23 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm4 {%k3}
+; AVX512BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm5, %ymm0
+; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm6, %ymm3
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15],ymm6[24],ymm5[24],ymm6[25],ymm5[25],ymm6[26],ymm5[26],ymm6[27],ymm5[27],ymm6[28],ymm5[28],ymm6[29],ymm5[29],ymm6[30],ymm5[30],ymm6[31],ymm5[31]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm5 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FCP-NEXT:    vpermw %ymm3, %ymm5, %ymm3
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm7, %ymm3
+; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm30, %ymm2
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm30[8],ymm7[8],ymm30[9],ymm7[9],ymm30[10],ymm7[10],ymm30[11],ymm7[11],ymm30[12],ymm7[12],ymm30[13],ymm7[13],ymm30[14],ymm7[14],ymm30[15],ymm7[15],ymm30[24],ymm7[24],ymm30[25],ymm7[25],ymm30[26],ymm7[26],ymm30[27],ymm7[27],ymm30[28],ymm7[28],ymm30[29],ymm7[29],ymm30[30],ymm7[30],ymm30[31],ymm7[31]
+; AVX512BW-FCP-NEXT:    vpermw %ymm3, %ymm24, %ymm3
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm0, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT:    vpshufb %zmm1, %zmm8, %zmm0
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm0, %zmm2 {%k2}
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k3}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm22, %zmm21 {%k1}
 ; AVX512BW-FCP-NEXT:    movl $1227133513, %eax # imm = 0x49249249
-; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm11, %zmm23 {%k2}
+; AVX512BW-FCP-NEXT:    kmovd %eax, %k3
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm20, %zmm21 {%k3}
 ; AVX512BW-FCP-NEXT:    movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm7, %zmm23 {%k3}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm10, %zmm6 {%k1}
-; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm18, %zmm6 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm20, %zmm6 {%k3}
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k4
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm15, %zmm21 {%k4}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm13, %zmm12 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm14, %zmm12 {%k1}
+; AVX512BW-FCP-NEXT:    movabsq $585610922974906400, %rax # imm = 0x820820820820820
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k5
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm23, %zmm12 {%k5}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm17, %zmm18 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm25, %zmm18 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm27, %zmm18 {%k5}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqu16 %zmm11, %zmm10 {%k3}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm19, %zmm10 {%k4}
 ; AVX512BW-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, 256(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm23, 64(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, 320(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm12, 192(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm21, 64(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512BW-FCP-NEXT:    vzeroupper
 ; AVX512BW-FCP-NEXT:    retq
 ;
 ; AVX512DQ-BW-LABEL: store_i8_stride6_vf64:
 ; AVX512DQ-BW:       # %bb.0:
 ; AVX512DQ-BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm7
+; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm5
 ; AVX512DQ-BW-NEXT:    vmovdqa64 (%r9), %zmm6
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512DQ-BW-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm1, %ymm0
+; AVX512DQ-BW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm2, %ymm3
+; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm2, %ymm3
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
@@ -6753,59 +6745,57 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-BW-NEXT:    vpermw %ymm3, %ymm8, %ymm3
 ; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm9
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512DQ-BW-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm11, %ymm3, %ymm0
+; AVX512DQ-BW-NEXT:    vpbroadcastq {{.*#+}} ymm12 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm12, %ymm3, %ymm0
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%rdx), %ymm4
-; AVX512DQ-BW-NEXT:    vpshufb %ymm11, %ymm4, %ymm10
+; AVX512DQ-BW-NEXT:    vpshufb %ymm12, %ymm4, %ymm10
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[4],ymm0[4],ymm10[5],ymm0[5],ymm10[6],ymm0[6],ymm10[7],ymm0[7],ymm10[16],ymm0[16],ymm10[17],ymm0[17],ymm10[18],ymm0[18],ymm10[19],ymm0[19],ymm10[20],ymm0[20],ymm10[21],ymm0[21],ymm10[22],ymm0[22],ymm10[23],ymm0[23]
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-NEXT:    vpermw %ymm10, %ymm12, %ymm10
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} ymm11 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-NEXT:    vpermw %ymm10, %ymm11, %ymm10
 ; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm0
 ; AVX512DQ-BW-NEXT:    movl $613566756, %r10d # imm = 0x24924924
 ; AVX512DQ-BW-NEXT:    kmovd %r10d, %k1
 ; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k1}
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm7[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[4,5,6,7,4,5,6,7]
 ; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm14 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,10,13,12,11,14,0,0,15,10,13,12,11,14,0,0,15]
-; AVX512DQ-BW-NEXT:    vpshufb %zmm14, %zmm9, %zmm9
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT:    vpshufb %zmm14, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[2,2,2,3,6,6,6,7]
 ; AVX512DQ-BW-NEXT:    movl $-1840700270, %r10d # imm = 0x92492492
 ; AVX512DQ-BW-NEXT:    kmovd %r10d, %k2
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k2}
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm6[4,5,6,7,4,5,6,7]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm9 = zmm9[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm5, %zmm0 {%k2}
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm5 = zmm5[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[2,2,2,3,6,6,6,7]
 ; AVX512DQ-BW-NEXT:    movabsq $-9076969306111049208, %r10 # imm = 0x8208208208208208
 ; AVX512DQ-BW-NEXT:    kmovq %r10, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm9, %zmm0 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm5, %zmm0 {%k3}
 ; AVX512DQ-BW-NEXT:    vmovdqa (%rsi), %ymm9
-; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm9, %ymm13
+; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm9, %ymm5
 ; AVX512DQ-BW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm10, %ymm5
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[2],ymm13[2],ymm5[3],ymm13[3],ymm5[4],ymm13[4],ymm5[5],ymm13[5],ymm5[6],ymm13[6],ymm5[7],ymm13[7],ymm5[16],ymm13[16],ymm5[17],ymm13[17],ymm5[18],ymm13[18],ymm5[19],ymm13[19],ymm5[20],ymm13[20],ymm5[21],ymm13[21],ymm5[22],ymm13[22],ymm5[23],ymm13[23]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm10, %ymm6
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[16],ymm5[16],ymm6[17],ymm5[17],ymm6[18],ymm5[18],ymm6[19],ymm5[19],ymm6[20],ymm5[20],ymm6[21],ymm5[21],ymm6[22],ymm5[22],ymm6[23],ymm5[23]
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
-; AVX512DQ-BW-NEXT:    vpermw %ymm13, %ymm8, %ymm8
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm8
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %ymm16
-; AVX512DQ-BW-NEXT:    vpshufb %ymm11, %ymm16, %ymm5
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %ymm17
-; AVX512DQ-BW-NEXT:    vpshufb %ymm11, %ymm17, %ymm13
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm13[0],ymm5[0],ymm13[1],ymm5[1],ymm13[2],ymm5[2],ymm13[3],ymm5[3],ymm13[4],ymm5[4],ymm13[5],ymm5[5],ymm13[6],ymm5[6],ymm13[7],ymm5[7],ymm13[16],ymm5[16],ymm13[17],ymm5[17],ymm13[18],ymm5[18],ymm13[19],ymm5[19],ymm13[20],ymm5[20],ymm13[21],ymm5[21],ymm13[22],ymm5[22],ymm13[23],ymm5[23]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15],ymm10[24],ymm9[24],ymm10[25],ymm9[25],ymm10[26],ymm9[26],ymm10[27],ymm9[27],ymm10[28],ymm9[28],ymm10[29],ymm9[29],ymm10[30],ymm9[30],ymm10[31],ymm9[31]
+; AVX512DQ-BW-NEXT:    vpermw %ymm6, %ymm8, %ymm6
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm6
+; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %ymm17
+; AVX512DQ-BW-NEXT:    vpshufb %ymm12, %ymm17, %ymm5
+; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %ymm19
+; AVX512DQ-BW-NEXT:    vpshufb %ymm12, %ymm19, %ymm7
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[4],ymm5[4],ymm7[5],ymm5[5],ymm7[6],ymm5[6],ymm7[7],ymm5[7],ymm7[16],ymm5[16],ymm7[17],ymm5[17],ymm7[18],ymm5[18],ymm7[19],ymm5[19],ymm7[20],ymm5[20],ymm7[21],ymm5[21],ymm7[22],ymm5[22],ymm7[23],ymm5[23]
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512DQ-BW-NEXT:    vpermw %ymm13, %ymm12, %ymm12
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm5
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm8, %zmm5 {%k1}
-; AVX512DQ-BW-NEXT:    vmovdqa (%r8), %ymm13
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm7
-; AVX512DQ-BW-NEXT:    vpshufb %zmm14, %zmm7, %zmm7
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm7, %zmm5 {%k2}
-; AVX512DQ-BW-NEXT:    vmovdqa (%r9), %ymm12
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm6, %zmm6
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm19[8],ymm17[8],ymm19[9],ymm17[9],ymm19[10],ymm17[10],ymm19[11],ymm17[11],ymm19[12],ymm17[12],ymm19[13],ymm17[13],ymm19[14],ymm17[14],ymm19[15],ymm17[15],ymm19[24],ymm17[24],ymm19[25],ymm17[25],ymm19[26],ymm17[26],ymm19[27],ymm17[27],ymm19[28],ymm17[28],ymm19[29],ymm17[29],ymm19[30],ymm17[30],ymm19[31],ymm17[31]
+; AVX512DQ-BW-NEXT:    vpermw %ymm7, %ymm11, %ymm7
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm6, %zmm5 {%k1}
+; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT:    vpshufb %zmm14, %zmm13, %zmm6
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm6, %zmm5 {%k2}
+; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm6 = zmm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
 ; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm6, %zmm5 {%k3}
 ; AVX512DQ-BW-NEXT:    vmovdqa64 (%rsi), %xmm21
@@ -6817,101 +6807,99 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-BW-NEXT:    vpshufb %xmm20, %xmm8, %xmm14
 ; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm14[8],xmm6[8],xmm14[9],xmm6[9],xmm14[10],xmm6[10],xmm14[11],xmm6[11],xmm14[12],xmm6[12],xmm14[13],xmm6[13],xmm14[14],xmm6[14],xmm14[15],xmm6[15]
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm27 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm14, %zmm27, %zmm6
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %xmm25
-; AVX512DQ-BW-NEXT:    vmovdqa 32(%rcx), %xmm14
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm14, %xmm15
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %xmm26
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm25 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm14, %zmm25, %zmm6
+; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %xmm23
+; AVX512DQ-BW-NEXT:    vmovdqa 32(%rcx), %xmm15
+; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm15, %xmm14
+; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %xmm24
 ; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rdx), %xmm18
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm18, %xmm19
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm18[0],xmm14[0],xmm18[1],xmm14[1],xmm18[2],xmm14[2],xmm18[3],xmm14[3],xmm18[4],xmm14[4],xmm18[5],xmm14[5],xmm18[6],xmm14[6],xmm18[7],xmm14[7]
-; AVX512DQ-BW-NEXT:    vprold $16, %xmm19, %xmm19
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm15, %zmm19, %zmm15
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm15 = zmm15[0,0,0,1,4,4,4,5]
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm15, %zmm6 {%k2}
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %xmm23
-; AVX512DQ-BW-NEXT:    vmovdqa 32(%r8), %xmm15
-; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm19 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero,xmm15[4],zero,xmm15[5],zero,xmm15[6],zero,xmm15[7],zero
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm24 = xmm15[2,1,2,3]
-; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm24 = xmm24[0],zero,xmm24[1],zero,xmm24[2],zero,xmm24[3],zero,xmm24[4],zero,xmm24[5],zero,xmm24[6],zero,xmm24[7],zero
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm24, %zmm28, %zmm19
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm19, %zmm6 {%k1}
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%r9), %xmm24
-; AVX512DQ-BW-NEXT:    vmovdqa64 32(%r9), %xmm19
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm29 = xmm19[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm30 = xmm19[2,1,2,3]
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm30 = xmm30[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm30, %zmm28, %zmm29
+; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm18, %xmm16
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm18[0],xmm15[0],xmm18[1],xmm15[1],xmm18[2],xmm15[2],xmm18[3],xmm15[3],xmm18[4],xmm15[4],xmm18[5],xmm15[5],xmm18[6],xmm15[6],xmm18[7],xmm15[7]
+; AVX512DQ-BW-NEXT:    vprold $16, %xmm16, %xmm16
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm14, %zmm6 {%k2}
+; AVX512DQ-BW-NEXT:    vmovdqa 32(%r8), %xmm14
+; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm16 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero,xmm14[4],zero,xmm14[5],zero,xmm14[6],zero,xmm14[7],zero
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm26 = xmm14[2,1,2,3]
+; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm26 = xmm26[0],zero,xmm26[1],zero,xmm26[2],zero,xmm26[3],zero,xmm26[4],zero,xmm26[5],zero,xmm26[6],zero,xmm26[7],zero
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm27 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm26, %zmm27, %zmm16
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm16, %zmm6 {%k1}
+; AVX512DQ-BW-NEXT:    vmovdqa64 32(%r9), %xmm16
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm26 = xmm16[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm28 = xmm16[2,1,2,3]
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm28 = xmm28[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm28, %zmm27, %zmm26
 ; AVX512DQ-BW-NEXT:    movabsq $585610922974906400, %rcx # imm = 0x820820820820820
 ; AVX512DQ-BW-NEXT:    kmovq %rcx, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm29, %zmm6 {%k3}
-; AVX512DQ-BW-NEXT:    vpshufb %xmm20, %xmm21, %xmm29
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm26, %zmm6 {%k3}
+; AVX512DQ-BW-NEXT:    vpshufb %xmm20, %xmm21, %xmm26
 ; AVX512DQ-BW-NEXT:    vpshufb %xmm20, %xmm22, %xmm20
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm29 = xmm20[8],xmm29[8],xmm20[9],xmm29[9],xmm20[10],xmm29[10],xmm20[11],xmm29[11],xmm20[12],xmm29[12],xmm20[13],xmm29[13],xmm20[14],xmm29[14],xmm20[15],xmm29[15]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm26 = xmm20[8],xmm26[8],xmm20[9],xmm26[9],xmm20[10],xmm26[10],xmm20[11],xmm26[11],xmm20[12],xmm26[12],xmm20[13],xmm26[13],xmm20[14],xmm26[14],xmm20[15],xmm26[15]
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm20 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm29, %zmm27, %zmm20
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm25, %xmm27
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm26, %xmm11
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm11[0],xmm27[0],xmm11[1],xmm27[1],xmm11[2],xmm27[2],xmm11[3],xmm27[3],xmm11[4],xmm27[4],xmm11[5],xmm27[5],xmm11[6],xmm27[6],xmm11[7],xmm27[7]
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm26[0],xmm25[0],xmm26[1],xmm25[1],xmm26[2],xmm25[2],xmm26[3],xmm25[3],xmm26[4],xmm25[4],xmm26[5],xmm25[5],xmm26[6],xmm25[6],xmm26[7],xmm25[7]
-; AVX512DQ-BW-NEXT:    vprold $16, %xmm27, %xmm27
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm27, %zmm11
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[0,0,0,1,4,4,4,5]
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm11, %zmm20 {%k2}
-; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm11 = xmm23[0],zero,xmm23[1],zero,xmm23[2],zero,xmm23[3],zero,xmm23[4],zero,xmm23[5],zero,xmm23[6],zero,xmm23[7],zero
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm27 = xmm23[2,1,2,3]
-; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm27 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero,xmm27[4],zero,xmm27[5],zero,xmm27[6],zero,xmm27[7],zero
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm27, %zmm28, %zmm11
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm11, %zmm20 {%k1}
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm24[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm27 = xmm24[2,1,2,3]
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm27, %zmm28, %zmm11
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm11, %zmm20 {%k3}
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm17 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm11, %zmm17, %zmm16
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm26, %zmm25, %zmm20
+; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm23, %xmm25
+; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm24, %xmm12
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm25[0],xmm12[1],xmm25[1],xmm12[2],xmm25[2],xmm12[3],xmm25[3],xmm12[4],xmm25[4],xmm12[5],xmm25[5],xmm12[6],xmm25[6],xmm12[7],xmm25[7]
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm24[0],xmm23[0],xmm24[1],xmm23[1],xmm24[2],xmm23[2],xmm24[3],xmm23[3],xmm24[4],xmm23[4],xmm24[5],xmm23[5],xmm24[6],xmm23[6],xmm24[7],xmm23[7]
+; AVX512DQ-BW-NEXT:    vprold $16, %xmm25, %xmm25
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm25, %zmm12
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm12, %zmm20 {%k2}
+; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm12 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm25 = xmm13[2,1,2,3]
+; AVX512DQ-BW-NEXT:    vpmovzxbw {{.*#+}} xmm25 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero,xmm25[4],zero,xmm25[5],zero,xmm25[6],zero,xmm25[7],zero
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm25, %zmm27, %zmm12
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm12, %zmm20 {%k1}
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} xmm25 = xmm11[2,1,2,3]
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm25[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm25, %zmm27, %zmm12
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm12, %zmm20 {%k3}
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm19[0],ymm17[0],ymm19[1],ymm17[1],ymm19[2],ymm17[2],ymm19[3],ymm17[3],ymm19[4],ymm17[4],ymm19[5],ymm17[5],ymm19[6],ymm17[6],ymm19[7],ymm17[7],ymm19[16],ymm17[16],ymm19[17],ymm17[17],ymm19[18],ymm17[18],ymm19[19],ymm17[19],ymm19[20],ymm17[20],ymm19[21],ymm17[21],ymm19[22],ymm17[22],ymm19[23],ymm17[23]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm17 = xmm24[8],xmm23[8],xmm24[9],xmm23[9],xmm24[10],xmm23[10],xmm24[11],xmm23[11],xmm24[12],xmm23[12],xmm24[13],xmm23[13],xmm24[14],xmm23[14],xmm24[15],xmm23[15]
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm19 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm12, %zmm19, %zmm17
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
 ; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm9, %zmm11, %zmm10
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k1}
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm12 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm9, %zmm12, %zmm10
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm17, %zmm10 {%k1}
 ; AVX512DQ-BW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
 ; AVX512DQ-BW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm9, %ymm13, %ymm13
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm23[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm9, %ymm13, %ymm17
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm21 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm13, %zmm21, %zmm16
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm17, %zmm21, %zmm13
 ; AVX512DQ-BW-NEXT:    movl $1227133513, %ecx # imm = 0x49249249
 ; AVX512DQ-BW-NEXT:    kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k2}
+; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm13, %zmm10 {%k2}
 ; AVX512DQ-BW-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
 ; AVX512DQ-BW-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm13, %ymm12, %ymm12
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm24[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm12, %zmm21, %zmm16
+; AVX512DQ-BW-NEXT:    vpshufb %ymm13, %ymm11, %ymm17
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm17, %zmm21, %zmm11
 ; AVX512DQ-BW-NEXT:    movabsq $2342443691899625602, %rcx # imm = 0x2082082082082082
 ; AVX512DQ-BW-NEXT:    kmovq %rcx, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm16, %zmm10 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm11, %zmm10 {%k3}
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm18[8],xmm14[8],xmm18[9],xmm14[9],xmm18[10],xmm14[10],xmm18[11],xmm14[11],xmm18[12],xmm14[12],xmm18[13],xmm14[13],xmm18[14],xmm14[14],xmm18[15],xmm14[15]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm3, %zmm17, %zmm4
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm18[8],xmm15[8],xmm18[9],xmm15[9],xmm18[10],xmm15[10],xmm18[11],xmm15[11],xmm18[12],xmm15[12],xmm18[13],xmm15[13],xmm18[14],xmm15[14],xmm18[15],xmm15[15]
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm3, %zmm19, %zmm4
 ; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
 ; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
-; AVX512DQ-BW-NEXT:    vpermt2w %zmm1, %zmm11, %zmm2
+; AVX512DQ-BW-NEXT:    vpermt2w %zmm1, %zmm12, %zmm2
 ; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm4, %zmm2 {%k1}
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512DQ-BW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm14[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512DQ-BW-NEXT:    vpermt2w %zmm1, %zmm21, %zmm3
 ; AVX512DQ-BW-NEXT:    vmovdqu16 %zmm3, %zmm2 {%k2}
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%r9), %ymm1
 ; AVX512DQ-BW-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm19[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm16[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; AVX512DQ-BW-NEXT:    vpermt2w %zmm1, %zmm21, %zmm3
 ; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k3}
 ; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, 256(%rax)
@@ -6925,185 +6913,181 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512DQ-BW-FCP-LABEL: store_i8_stride6_vf64:
 ; AVX512DQ-BW-FCP:       # %bb.0:
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm6, %ymm0
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm7
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm7, %ymm2
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11],ymm7[12],ymm6[12],ymm7[13],ymm6[13],ymm7[14],ymm6[14],ymm7[15],ymm6[15],ymm7[24],ymm6[24],ymm7[25],ymm6[25],ymm7[26],ymm6[26],ymm7[27],ymm6[27],ymm7[28],ymm6[28],ymm7[29],ymm6[29],ymm7[30],ymm6[30],ymm7[31],ymm6[31]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm1 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm3, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm5
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm10
-; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm10, %ymm0
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdx), %ymm11
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm11, %ymm4
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15],ymm11[24],ymm10[24],ymm11[25],ymm10[25],ymm11[26],ymm10[26],ymm11[27],ymm10[27],ymm11[28],ymm10[28],ymm11[29],ymm10[29],ymm11[30],ymm10[30],ymm11[31],ymm10[31]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm4 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm8, %ymm4, %ymm8
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512DQ-BW-FCP-NEXT:    movl $613566756, %eax # imm = 0x24924924
-; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm5, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rcx), %ymm5
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %xmm23
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rcx), %xmm17
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm3, %xmm17, %xmm8
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %xmm25
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rdx), %xmm21
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm3, %xmm21, %xmm9
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm21[0],xmm17[0],xmm21[1],xmm17[1],xmm21[2],xmm17[2],xmm21[3],xmm17[3],xmm21[4],xmm17[4],xmm21[5],xmm17[5],xmm21[6],xmm17[6],xmm21[7],xmm17[7]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm13 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm8, %zmm13, %zmm9
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm30
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rsi), %xmm18
-; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} xmm15 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm15, %xmm18, %xmm8
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %xmm31
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rdi), %xmm24
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm15, %xmm24, %xmm12
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm24[0],xmm18[0],xmm24[1],xmm18[1],xmm24[2],xmm18[2],xmm24[3],xmm18[3],xmm24[4],xmm18[4],xmm24[5],xmm18[5],xmm24[6],xmm18[6],xmm24[7],xmm18[7]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm16 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm12, %zmm16, %zmm8
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %xmm26
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r8), %xmm20
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} xmm19 = [8,9,0,0,0,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm19, %xmm20, %xmm14
-; AVX512DQ-BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm12 = xmm20[0],zero,xmm20[1],zero,xmm20[2],zero,xmm20[3],zero,xmm20[4],zero,xmm20[5],zero,xmm20[6],zero,xmm20[7],zero
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm14, %zmm28, %zmm12
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %xmm27
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r9), %xmm22
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm3, %xmm23, %xmm14
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm3, %xmm25, %xmm29
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm29 = xmm29[0],xmm14[0],xmm29[1],xmm14[1],xmm29[2],xmm14[2],xmm29[3],xmm14[3],xmm29[4],xmm14[4],xmm29[5],xmm14[5],xmm29[6],xmm14[6],xmm29[7],xmm14[7]
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm25[0],xmm23[0],xmm25[1],xmm23[1],xmm25[2],xmm23[2],xmm25[3],xmm23[3],xmm25[4],xmm23[4],xmm25[5],xmm23[5],xmm25[6],xmm23[6],xmm25[7],xmm23[7]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm29, %zmm13, %zmm14
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [u,8,u,9,u,10,u,11,u,4,u,5,u,6,u,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm15, %xmm30, %xmm13
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm15, %xmm31, %xmm15
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm31[0],xmm30[0],xmm31[1],xmm30[1],xmm31[2],xmm30[2],xmm31[3],xmm30[3],xmm31[4],xmm30[4],xmm31[5],xmm30[5],xmm31[6],xmm30[6],xmm31[7],xmm30[7]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm15, %zmm16, %zmm13
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm29, %xmm22, %xmm16
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm22[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm16, %zmm28, %zmm15
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm19, %xmm26, %xmm19
-; AVX512DQ-BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm16 = xmm26[0],zero,xmm26[1],zero,xmm26[2],zero,xmm26[3],zero,xmm26[4],zero,xmm26[5],zero,xmm26[6],zero,xmm26[7],zero
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm19, %zmm28, %zmm16
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm29, %xmm27, %xmm29
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm27[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm29, %zmm28, %zmm19
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %ymm28
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm29 = ymm28[0],ymm5[0],ymm28[1],ymm5[1],ymm28[2],ymm5[2],ymm28[3],ymm5[3],ymm28[4],ymm5[4],ymm28[5],ymm5[5],ymm28[6],ymm5[6],ymm28[7],ymm5[7],ymm28[16],ymm5[16],ymm28[17],ymm5[17],ymm28[18],ymm5[18],ymm28[19],ymm5[19],ymm28[20],ymm5[20],ymm28[21],ymm5[21],ymm28[22],ymm5[22],ymm28[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm25 = xmm25[8],xmm23[8],xmm25[9],xmm23[9],xmm25[10],xmm23[10],xmm25[11],xmm23[11],xmm25[12],xmm23[12],xmm25[13],xmm23[13],xmm25[14],xmm23[14],xmm25[15],xmm23[15]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm29, %zmm1, %zmm25
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %ymm29
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm31[8],xmm30[8],xmm31[9],xmm30[9],xmm31[10],xmm30[10],xmm31[11],xmm30[11],xmm31[12],xmm30[12],xmm31[13],xmm30[13],xmm31[14],xmm30[14],xmm31[15],xmm30[15]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %ymm30
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm31 = ymm30[0],ymm29[0],ymm30[1],ymm29[1],ymm30[2],ymm29[2],ymm30[3],ymm29[3],ymm30[4],ymm29[4],ymm30[5],ymm29[5],ymm30[6],ymm29[6],ymm30[7],ymm29[7],ymm30[16],ymm29[16],ymm30[17],ymm29[17],ymm30[18],ymm29[18],ymm30[19],ymm29[19],ymm30[20],ymm29[20],ymm30[21],ymm29[21],ymm30[22],ymm29[22],ymm30[23],ymm29[23]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm31, %zmm2, %zmm23
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[16],ymm10[16],ymm11[17],ymm10[17],ymm11[18],ymm10[18],ymm11[19],ymm10[19],ymm11[20],ymm10[20],ymm11[21],ymm10[21],ymm11[22],ymm10[22],ymm11[23],ymm10[23]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm21[8],xmm17[8],xmm21[9],xmm17[9],xmm21[10],xmm17[10],xmm21[11],xmm17[11],xmm21[12],xmm17[12],xmm21[13],xmm17[13],xmm21[14],xmm17[14],xmm21[15],xmm17[15]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %ymm17
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm11, %zmm1, %zmm10
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm10
+; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm0, %ymm10, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm11
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm0, %ymm11, %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15],ymm11[24],ymm10[24],ymm11[25],ymm10[25],ymm11[26],ymm10[26],ymm11[27],ymm10[27],ymm11[28],ymm10[28],ymm11[29],ymm10[29],ymm11[30],ymm10[30],ymm11[31],ymm10[31]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm0 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm3, %ymm0, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm4
+; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdi), %ymm6
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rcx), %ymm7
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %xmm21
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rcx), %xmm16
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %xmm22
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rdx), %xmm20
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm24
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rsi), %xmm15
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %xmm26
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rdi), %xmm19
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm16, %xmm12
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm20, %xmm13
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm20[0],xmm16[0],xmm20[1],xmm16[1],xmm20[2],xmm16[2],xmm20[3],xmm16[3],xmm20[4],xmm16[4],xmm20[5],xmm16[5],xmm20[6],xmm16[6],xmm20[7],xmm16[7]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm18 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm12, %zmm18, %zmm13
+; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} xmm23 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm23, %xmm15, %xmm12
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm23, %xmm19, %xmm14
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm25 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5,32,33,34,35,32,33,34,35,32,33,34,35,36,37,38,39]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm14, %zmm25, %zmm12
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r8), %xmm28
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} xmm27 = [8,9,0,0,0,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm27, %xmm28, %xmm17
+; AVX512DQ-BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm14 = xmm28[0],zero,xmm28[1],zero,xmm28[2],zero,xmm28[3],zero,xmm28[4],zero,xmm28[5],zero,xmm28[6],zero,xmm28[7],zero
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm30 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm17, %zmm30, %zmm14
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r9), %xmm29
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm21, %xmm17
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm22, %xmm31
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm31 = xmm31[0],xmm17[0],xmm31[1],xmm17[1],xmm31[2],xmm17[2],xmm31[3],xmm17[3],xmm31[4],xmm17[4],xmm31[5],xmm17[5],xmm31[6],xmm17[6],xmm31[7],xmm17[7]
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm31, %zmm18, %zmm17
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [u,8,u,9,u,10,u,11,u,4,u,5,u,6,u,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm23, %xmm24, %xmm18
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm23, %xmm26, %xmm23
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm23[8],xmm18[8],xmm23[9],xmm18[9],xmm23[10],xmm18[10],xmm23[11],xmm18[11],xmm23[12],xmm18[12],xmm23[13],xmm18[13],xmm23[14],xmm18[14],xmm23[15],xmm18[15]
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm18 = xmm26[0],xmm24[0],xmm26[1],xmm24[1],xmm26[2],xmm24[2],xmm26[3],xmm24[3],xmm26[4],xmm24[4],xmm26[5],xmm24[5],xmm26[6],xmm24[6],xmm26[7],xmm24[7]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm23, %zmm25, %zmm18
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm31, %xmm29, %xmm25
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm23 = xmm29[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm25, %zmm30, %zmm23
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm27, %xmm8, %xmm27
+; AVX512DQ-BW-FCP-NEXT:    vpmovzxbw {{.*#+}} xmm25 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm27, %zmm30, %zmm25
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm31, %xmm9, %xmm31
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm31, %zmm30, %zmm27
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %ymm30
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm31 = ymm30[0],ymm7[0],ymm30[1],ymm7[1],ymm30[2],ymm7[2],ymm30[3],ymm7[3],ymm30[4],ymm7[4],ymm30[5],ymm7[5],ymm30[6],ymm7[6],ymm30[7],ymm7[7],ymm30[16],ymm7[16],ymm30[17],ymm7[17],ymm30[18],ymm7[18],ymm30[19],ymm7[19],ymm30[20],ymm7[20],ymm30[21],ymm7[21],ymm30[22],ymm7[22],ymm30[23],ymm7[23]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm31, %zmm0, %zmm22
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm31 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[16],ymm5[16],ymm6[17],ymm5[17],ymm6[18],ymm5[18],ymm6[19],ymm5[19],ymm6[20],ymm5[20],ymm6[21],ymm5[21],ymm6[22],ymm5[22],ymm6[23],ymm5[23]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm21 = xmm26[8],xmm24[8],xmm26[9],xmm24[9],xmm26[10],xmm24[10],xmm26[11],xmm24[11],xmm26[12],xmm24[12],xmm26[13],xmm24[13],xmm26[14],xmm24[14],xmm26[15],xmm24[15]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm24 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm31, %zmm24, %zmm21
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rdx), %ymm26
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm20[8],xmm16[8],xmm20[9],xmm16[9],xmm20[10],xmm16[10],xmm20[11],xmm16[11],xmm20[12],xmm16[12],xmm20[13],xmm16[13],xmm20[14],xmm16[14],xmm20[15],xmm16[15]
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm26[0],ymm4[0],ymm26[1],ymm4[1],ymm26[2],ymm4[2],ymm26[3],ymm4[3],ymm26[4],ymm4[4],ymm26[5],ymm4[5],ymm26[6],ymm4[6],ymm26[7],ymm4[7],ymm26[16],ymm4[16],ymm26[17],ymm4[17],ymm26[18],ymm4[18],ymm26[19],ymm4[19],ymm26[20],ymm4[20],ymm26[21],ymm4[21],ymm26[22],ymm4[22],ymm26[23],ymm4[23]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm20, %zmm0, %zmm16
 ; AVX512DQ-BW-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
 ; AVX512DQ-BW-FCP-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm17, %ymm21
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm26[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm26 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm21, %zmm26, %zmm11
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[4],ymm6[4],ymm7[5],ymm6[5],ymm7[6],ymm6[6],ymm7[7],ymm6[7],ymm7[16],ymm6[16],ymm7[17],ymm6[17],ymm7[18],ymm6[18],ymm7[19],ymm6[19],ymm7[20],ymm6[20],ymm7[21],ymm6[21],ymm7[22],ymm6[22],ymm7[23],ymm6[23]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm24[8],xmm18[8],xmm24[9],xmm18[9],xmm24[10],xmm18[10],xmm24[11],xmm18[11],xmm24[12],xmm18[12],xmm24[13],xmm18[13],xmm24[14],xmm18[14],xmm24[15],xmm18[15]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %ymm21
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm7, %zmm2, %zmm6
-; AVX512DQ-BW-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
-; AVX512DQ-BW-FCP-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm21, %ymm18
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm27[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm18, %zmm26, %zmm7
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r8), %ymm18
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm18, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm24
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm18 = xmm20[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm1, %zmm26, %zmm18
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm8, %ymm31
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,40,41,42,43,40,41,42,43,40,41,42,43,44,45,46,47]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm31, %zmm0, %zmm20
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[16],ymm10[16],ymm11[17],ymm10[17],ymm11[18],ymm10[18],ymm11[19],ymm10[19],ymm11[20],ymm10[20],ymm11[21],ymm10[21],ymm11[22],ymm10[22],ymm11[23],ymm10[23]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm19[8],xmm15[8],xmm19[9],xmm15[9],xmm19[10],xmm15[10],xmm19[11],xmm15[11],xmm19[12],xmm15[12],xmm19[13],xmm15[13],xmm19[14],xmm15[14],xmm19[15],xmm15[15]
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
+; AVX512DQ-BW-FCP-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm11, %zmm24, %zmm10
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm19, %ymm9, %ymm11
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm11, %zmm0, %zmm15
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r8), %ymm11
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm28[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm1, %zmm0, %zmm11
 ; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm2
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm22[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm1, %zmm26, %zmm20
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm24[4,5,6,7,4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm22 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,10,13,12,11,14,0,0,15,10,13,12,11,14,0,0,15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %zmm22, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm19, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm19 = xmm29[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermt2w %zmm1, %zmm0, %zmm19
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm26, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm26[8],ymm4[8],ymm26[9],ymm4[9],ymm26[10],ymm4[10],ymm26[11],ymm4[11],ymm26[12],ymm4[12],ymm26[13],ymm4[13],ymm26[14],ymm4[14],ymm26[15],ymm4[15],ymm26[24],ymm4[24],ymm26[25],ymm4[25],ymm26[26],ymm4[26],ymm26[27],ymm4[27],ymm26[28],ymm4[28],ymm26[29],ymm4[29],ymm26[30],ymm4[30],ymm26[31],ymm4[31]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm24 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm1, %ymm24, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm4
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
+; AVX512DQ-BW-FCP-NEXT:    movl $613566756, %eax # imm = 0x24924924
+; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm3, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,10,13,12,11,14,0,0,15,10,13,12,11,14,0,0,15]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
 ; AVX512DQ-BW-FCP-NEXT:    movl $-1840700270, %eax # imm = 0x92492492
 ; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[4,5,6,7,4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm0, %zmm4 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7,4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,42,u,45,u,44,u,43,u,46,u,u,u,u,u,47,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k3}
-; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm26 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm26, %ymm29, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm26, %ymm30, %ymm26
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm26[0],ymm1[0],ymm26[1],ymm1[1],ymm26[2],ymm1[2],ymm26[3],ymm1[3],ymm26[4],ymm1[4],ymm26[5],ymm1[5],ymm26[6],ymm1[6],ymm26[7],ymm1[7],ymm26[16],ymm1[16],ymm26[17],ymm1[17],ymm26[18],ymm1[18],ymm26[19],ymm1[19],ymm26[20],ymm1[20],ymm26[21],ymm1[21],ymm26[22],ymm1[22],ymm26[23],ymm1[23]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm26 = ymm30[8],ymm29[8],ymm30[9],ymm29[9],ymm30[10],ymm29[10],ymm30[11],ymm29[11],ymm30[12],ymm29[12],ymm30[13],ymm29[13],ymm30[14],ymm29[14],ymm30[15],ymm29[15],ymm30[24],ymm29[24],ymm30[25],ymm29[25],ymm30[26],ymm29[26],ymm30[27],ymm29[27],ymm30[28],ymm29[28],ymm30[29],ymm29[29],ymm30[30],ymm29[30],ymm30[31],ymm29[31]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm27 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm26, %ymm27, %ymm26
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm26, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm5, %ymm26
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm28, %ymm3
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm26[0],ymm3[1],ymm26[1],ymm3[2],ymm26[2],ymm3[3],ymm26[3],ymm3[4],ymm26[4],ymm3[5],ymm26[5],ymm3[6],ymm26[6],ymm3[7],ymm26[7],ymm3[16],ymm26[16],ymm3[17],ymm26[17],ymm3[18],ymm26[18],ymm3[19],ymm26[19],ymm3[20],ymm26[20],ymm3[21],ymm26[21],ymm3[22],ymm26[22],ymm3[23],ymm26[23]
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm28[8],ymm5[8],ymm28[9],ymm5[9],ymm28[10],ymm5[10],ymm28[11],ymm5[11],ymm28[12],ymm5[12],ymm28[13],ymm5[13],ymm28[14],ymm5[14],ymm28[15],ymm5[15],ymm28[24],ymm5[24],ymm28[25],ymm5[25],ymm28[26],ymm5[26],ymm28[27],ymm5[27],ymm28[28],ymm5[28],ymm28[29],ymm5[29],ymm28[30],ymm5[30],ymm28[31],ymm5[31]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm5, %ymm4, %ymm4
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm17, %zmm24, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %zmm22, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm21, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,2,2,3,6,6,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm3 {%k3}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm9, %zmm8 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm12, %zmm8 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    movabsq $585610922974906400, %rax # imm = 0x820820820820820
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm15, %zmm8 {%k3}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm14, %zmm13 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm16, %zmm13 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm19, %zmm13 {%k3}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm25, %zmm23 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm4 {%k3}
+; AVX512DQ-BW-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm5, %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm6, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15],ymm6[24],ymm5[24],ymm6[25],ymm5[25],ymm6[26],ymm5[26],ymm6[27],ymm5[27],ymm6[28],ymm5[28],ymm6[29],ymm5[29],ymm6[30],ymm5[30],ymm6[31],ymm5[31]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm5 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm3, %ymm5, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm7, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm30, %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm30[8],ymm7[8],ymm30[9],ymm7[9],ymm30[10],ymm7[10],ymm30[11],ymm7[11],ymm30[12],ymm7[12],ymm30[13],ymm7[13],ymm30[14],ymm7[14],ymm30[15],ymm7[15],ymm30[24],ymm7[24],ymm30[25],ymm7[25],ymm30[26],ymm7[26],ymm30[27],ymm7[27],ymm30[28],ymm7[28],ymm30[29],ymm7[29],ymm30[30],ymm7[30],ymm30[31],ymm7[31]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm3, %ymm24, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm0, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %zmm1, %zmm8, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm0, %zmm2 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,u,21,u,24,u,23,u,u,u,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,58,u,61,u,60,u,59,u,62,u,u,u,u,u,63]
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k3}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm22, %zmm21 {%k1}
 ; AVX512DQ-BW-FCP-NEXT:    movl $1227133513, %eax # imm = 0x49249249
-; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm11, %zmm23 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm20, %zmm21 {%k3}
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm7, %zmm23 {%k3}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm10, %zmm6 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm18, %zmm6 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm20, %zmm6 {%k3}
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k4
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm15, %zmm21 {%k4}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm13, %zmm12 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm14, %zmm12 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    movabsq $585610922974906400, %rax # imm = 0x820820820820820
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k5
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm23, %zmm12 {%k5}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm17, %zmm18 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm25, %zmm18 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm27, %zmm18 {%k5}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu16 %zmm11, %zmm10 {%k3}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm19, %zmm10 {%k4}
 ; AVX512DQ-BW-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, 256(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm23, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm12, 192(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm21, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm4, 320(%rax)
 ; AVX512DQ-BW-FCP-NEXT:    vzeroupper
 ; AVX512DQ-BW-FCP-NEXT:    retq
   %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 9aa3330c57778..f4055a953badd 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -9852,1130 +9852,1096 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-LABEL: store_i8_stride7_vf64:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm3
+; AVX512BW-NEXT:    vmovdqa64 (%rax), %zmm14
 ; AVX512BW-NEXT:    vmovdqa64 32(%rdx), %ymm16
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-NEXT:    vpshufb %ymm0, %ymm16, %ymm2
-; AVX512BW-NEXT:    vmovdqa64 32(%rcx), %ymm17
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm1, %ymm17, %ymm4
-; AVX512BW-NEXT:    vpor %ymm2, %ymm4, %ymm2
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512BW-NEXT:    vpshufb %ymm4, %ymm16, %ymm4
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm5, %ymm17, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-NEXT:    vpshufb %ymm2, %ymm16, %ymm1
+; AVX512BW-NEXT:    vmovdqa64 32(%rcx), %ymm18
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-NEXT:    vpshufb %ymm3, %ymm18, %ymm4
+; AVX512BW-NEXT:    vpor %ymm1, %ymm4, %ymm1
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-NEXT:    vpshufb %ymm0, %ymm16, %ymm4
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-NEXT:    vpshufb %ymm0, %ymm18, %ymm5
 ; AVX512BW-NEXT:    vpor %ymm4, %ymm5, %ymm4
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm4
 ; AVX512BW-NEXT:    vmovdqa64 32(%rsi), %ymm24
 ; AVX512BW-NEXT:    vmovdqa64 32(%rdi), %ymm23
-; AVX512BW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm23[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,1,1,4,4,5,5]
+; AVX512BW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm23[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
 ; AVX512BW-NEXT:    movl $676341840, %r10d # imm = 0x28502850
 ; AVX512BW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm24[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-NEXT:    vpshufb %ymm7, %ymm23, %ymm5
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-NEXT:    vpshufb %ymm8, %ymm24, %ymm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 {%k1} = ymm24[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-NEXT:    vpshufb %ymm8, %ymm23, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-NEXT:    vpshufb %ymm10, %ymm24, %ymm6
 ; AVX512BW-NEXT:    vpor %ymm5, %ymm6, %ymm5
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm5
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm6
 ; AVX512BW-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512BW-NEXT:    kmovq %r10, %k2
-; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm5 {%k2}
+; AVX512BW-NEXT:    vmovdqu8 %zmm4, %zmm6 {%k2}
 ; AVX512BW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm2, %ymm4, %ymm9
-; AVX512BW-NEXT:    vmovdqa 32(%r8), %ymm10
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm6, %ymm10, %ymm11
-; AVX512BW-NEXT:    vpor %ymm9, %ymm11, %ymm9
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-NEXT:    vpshufb %ymm11, %ymm10, %ymm10
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX512BW-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-NEXT:    vpshufb %ymm5, %ymm4, %ymm7
+; AVX512BW-NEXT:    vmovdqa 32(%r8), %ymm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-NEXT:    vpshufb %ymm1, %ymm9, %ymm11
+; AVX512BW-NEXT:    vpor %ymm7, %ymm11, %ymm11
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
+; AVX512BW-NEXT:    vpor %ymm4, %ymm9, %ymm4
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm9, %zmm4
-; AVX512BW-NEXT:    vmovdqa 32(%rax), %ymm9
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm10 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-NEXT:    vpermw %ymm9, %ymm10, %ymm10
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm11, %zmm4
+; AVX512BW-NEXT:    vmovdqa 32(%rax), %ymm11
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512BW-NEXT:    vpermw %ymm11, %ymm0, %ymm12
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
 ; AVX512BW-NEXT:    movabsq $145249953336295682, %rax # imm = 0x204081020408102
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm4 {%k3}
+; AVX512BW-NEXT:    vmovdqu8 %zmm11, %zmm4 {%k3}
 ; AVX512BW-NEXT:    movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm4, %zmm5 {%k3}
-; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX512BW-NEXT:    vpshufb %ymm7, %ymm14, %ymm4
+; AVX512BW-NEXT:    vmovdqu8 %zmm4, %zmm6 {%k3}
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm13
+; AVX512BW-NEXT:    vpshufb %ymm8, %ymm13, %ymm4
 ; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512BW-NEXT:    vpshufb %ymm8, %ymm15, %ymm7
-; AVX512BW-NEXT:    vpor %ymm4, %ymm7, %ymm4
+; AVX512BW-NEXT:    vpshufb %ymm10, %ymm15, %ymm8
+; AVX512BW-NEXT:    vpor %ymm4, %ymm8, %ymm4
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %xmm25
 ; AVX512BW-NEXT:    vmovdqa 32(%rdi), %xmm8
 ; AVX512BW-NEXT:    vmovdqa64 (%rsi), %xmm26
 ; AVX512BW-NEXT:    vmovdqa 32(%rsi), %xmm10
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpshufb %ymm0, %ymm18, %ymm0
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpshufb %ymm2, %ymm17, %ymm2
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-NEXT:    vpshufb %ymm1, %ymm19, %ymm1
-; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm19[8],xmm18[8],xmm19[9],xmm18[9],xmm19[10],xmm18[10],xmm19[11],xmm18[11],xmm19[12],xmm18[12],xmm19[13],xmm18[13],xmm19[14],xmm18[14],xmm19[15],xmm18[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vpshufb %ymm3, %ymm19, %ymm3
+; AVX512BW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512BW-NEXT:    movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm4 {%k3}
-; AVX512BW-NEXT:    vmovdqa64 (%r9), %ymm20
-; AVX512BW-NEXT:    vpshufb %ymm2, %ymm20, %ymm0
-; AVX512BW-NEXT:    vmovdqa64 (%r8), %ymm21
-; AVX512BW-NEXT:    vpshufb %ymm6, %ymm21, %ymm1
-; AVX512BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT:    vmovdqa64 (%r9), %xmm30
-; AVX512BW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm30[8],xmm2[9],xmm30[9],xmm2[10],xmm30[10],xmm2[11],xmm30[11],xmm2[12],xmm30[12],xmm2[13],xmm30[13],xmm2[14],xmm30[14],xmm2[15],xmm30[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512BW-NEXT:    vpermw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm4 {%k3}
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpshufb %ymm5, %ymm20, %ymm2
+; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vpshufb %ymm1, %ymm22, %ymm3
+; AVX512BW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm22[8],xmm20[8],xmm22[9],xmm20[9],xmm22[10],xmm20[10],xmm22[11],xmm20[11],xmm22[12],xmm20[12],xmm22[13],xmm20[13],xmm22[14],xmm20[14],xmm22[15],xmm20[15]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512BW-NEXT:    vpermw %zmm14, %zmm3, %zmm3
 ; AVX512BW-NEXT:    movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k3}
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k3}
 ; AVX512BW-NEXT:    movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm4 {%k3}
-; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm14[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
-; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm4 {%k3}
+; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm13[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7]
+; AVX512BW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
 ; AVX512BW-NEXT:    movl $338170920, %eax # imm = 0x14281428
 ; AVX512BW-NEXT:    kmovd %eax, %k3
-; AVX512BW-NEXT:    vpshufb %ymm7, %ymm15, %ymm0 {%k3}
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-NEXT:    vpshufb %xmm12, %xmm6, %xmm6
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm6[0,1,0,1]
-; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm18[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,3,4,6,7,7]
+; AVX512BW-NEXT:    vpshufb %ymm3, %ymm15, %ymm2 {%k3}
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-NEXT:    vpshufb %xmm31, %xmm5, %xmm5
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[2,3,2,3],zmm5[0,1,0,1]
 ; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512BW-NEXT:    vpshufb %ymm1, %ymm19, %ymm9
-; AVX512BW-NEXT:    vmovdqu8 %ymm6, %ymm9 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 32(%rcx), %xmm29
-; AVX512BW-NEXT:    vmovdqa64 32(%rdx), %xmm31
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm31[0],xmm29[0],xmm31[1],xmm29[1],xmm31[2],xmm29[2],xmm31[3],xmm29[3],xmm31[4],xmm29[4],xmm31[5],xmm29[5],xmm31[6],xmm29[6],xmm31[7],xmm29[7]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm6, %xmm6
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm9[2,3,2,3],zmm6[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k2}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm21[27],zero,zero,zero,zero,ymm21[30],zero,ymm21[28],zero,zero,zero,zero,ymm21[31],zero,ymm21[29]
-; AVX512BW-NEXT:    vpor %ymm0, %ymm6, %ymm0
-; AVX512BW-NEXT:    vmovdqa64 32(%r9), %xmm27
-; AVX512BW-NEXT:    vmovdqa64 32(%r8), %xmm28
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm28[0],xmm27[0],xmm28[1],xmm27[1],xmm28[2],xmm27[2],xmm28[3],xmm27[3],xmm28[4],xmm27[4],xmm28[5],xmm27[5],xmm28[6],xmm27[6],xmm28[7],xmm27[7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm6[0,1,0,1]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm6 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-NEXT:    vpermw %zmm3, %zmm6, %zmm6
+; AVX512BW-NEXT:    vpshufb %ymm1, %ymm19, %ymm5
+; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm17[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,2,3,3,4,6,7,7]
+; AVX512BW-NEXT:    vmovdqu8 %ymm7, %ymm5 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 32(%rcx), %xmm27
+; AVX512BW-NEXT:    vmovdqa64 32(%rdx), %xmm29
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm29[0],xmm27[0],xmm29[1],xmm27[1],xmm29[2],xmm27[2],xmm29[3],xmm27[3],xmm29[4],xmm27[4],xmm29[5],xmm27[5],xmm29[6],xmm27[6],xmm29[7],xmm27[7]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm21, %xmm21
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm5[2,3,2,3],zmm21[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm21 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[27],zero,zero,zero,zero,ymm22[30],zero,ymm22[28],zero,zero,zero,zero,ymm22[31],zero,ymm22[29]
+; AVX512BW-NEXT:    vpor %ymm2, %ymm5, %ymm2
+; AVX512BW-NEXT:    vmovdqa64 32(%r9), %xmm28
+; AVX512BW-NEXT:    vmovdqa64 32(%r8), %xmm30
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm30[0],xmm28[0],xmm30[1],xmm28[1],xmm30[2],xmm28[2],xmm30[3],xmm28[3],xmm30[4],xmm28[4],xmm30[5],xmm28[5],xmm30[6],xmm28[6],xmm30[7],xmm28[7]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[2,3,2,3],zmm0[0,1,0,1]
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-NEXT:    vpermw %zmm14, %zmm2, %zmm2
 ; AVX512BW-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512BW-NEXT:    kmovq %rax, %k2
-; AVX512BW-NEXT:    vmovdqu8 %zmm6, %zmm0 {%k2}
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k2}
 ; AVX512BW-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512BW-NEXT:    kmovq %rax, %k2
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k2}
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm21 {%k2}
 ; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm23[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
-; AVX512BW-NEXT:    vpshufb %ymm7, %ymm24, %ymm0 {%k3}
-; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-NEXT:    vpshufb %ymm7, %ymm24, %ymm6
+; AVX512BW-NEXT:    vpshufb %ymm3, %ymm24, %ymm0 {%k3}
+; AVX512BW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-NEXT:    vpshufb %ymm3, %ymm24, %ymm2
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
 ; AVX512BW-NEXT:    vpshufb %ymm24, %ymm23, %ymm23
-; AVX512BW-NEXT:    vporq %ymm6, %ymm23, %ymm6
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm6[2,3,2,3],zmm0[2,3,2,3]
+; AVX512BW-NEXT:    vporq %ymm2, %ymm23, %ymm2
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm2[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm16[24,25],zero,ymm16[23],zero,ymm16[21,22,23,26],zero,ymm16[24],zero,ymm16[28,29,26,27]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm17[25],zero,ymm17[23],zero,zero,zero,zero,ymm17[26],zero,ymm17[24],zero,zero,zero,zero
-; AVX512BW-NEXT:    vpor %ymm0, %ymm6, %ymm0
-; AVX512BW-NEXT:    vpshufb %ymm1, %ymm17, %ymm1
-; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,3,4,6,7,7]
-; AVX512BW-NEXT:    vmovdqu8 %ymm6, %ymm1 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm18[25],zero,ymm18[23],zero,zero,zero,zero,ymm18[26],zero,ymm18[24],zero,zero,zero,zero
+; AVX512BW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512BW-NEXT:    vpshufb %ymm1, %ymm18, %ymm1
+; AVX512BW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,3,3,4,6,7,7]
+; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
 ; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512BW-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
 ; AVX512BW-NEXT:    kmovq %rax, %k2
 ; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k2}
 ; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 (%r9), %zmm1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm0[4,5,6,7],zmm1[4,5,6,7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm6[23],zero,zmm6[23,24,25,26],zero,zmm6[24],zero,zmm6[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm6[59],zero,zero,zero,zero,zmm6[62],zero,zmm6[60],zero,zero,zero,zero,zmm6[63],zero
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm1[4,5,6,7],zmm0[4,5,6,7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm16 = zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm16[25],zero,zmm16[23],zero,zero,zero,zero,zmm16[26],zero,zmm16[24],zero,zero,zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[59],zero,zero,zero,zero,zmm16[62],zero,zmm16[60],zero,zero,zero,zero,zmm16[63],zero,zmm16[61]
-; AVX512BW-NEXT:    vporq %zmm6, %zmm16, %zmm6
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm0[4,5,6,7],zmm1[4,5,6,7]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7],zmm0[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm1 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm2[23],zero,zmm2[23,24,25,26],zero,zmm2[24],zero,zmm2[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
+; AVX512BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
 ; AVX512BW-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
 ; AVX512BW-NEXT:    kmovq %rax, %k2
-; AVX512BW-NEXT:    vmovdqu8 %zmm6, %zmm23 {%k2}
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm6 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
-; AVX512BW-NEXT:    vpermw %zmm3, %zmm6, %zmm6
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k2}
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512BW-NEXT:    vpermw %zmm14, %zmm0, %zmm0
 ; AVX512BW-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm6, %zmm23 {%k3}
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm26, %xmm16
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-NEXT:    vpshufb %xmm6, %xmm25, %xmm13
-; AVX512BW-NEXT:    vporq %xmm16, %xmm13, %xmm13
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm25[0],xmm26[0],xmm25[1],xmm26[1],xmm25[2],xmm26[2],xmm25[3],xmm26[3],xmm25[4],xmm26[4],xmm25[5],xmm26[5],xmm25[6],xmm26[6],xmm25[7],xmm26[7]
-; AVX512BW-NEXT:    vpshufb %xmm12, %xmm16, %xmm12
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm12[0,1,0,1],zmm13[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-NEXT:    vpshufb %xmm25, %xmm19, %xmm12
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-NEXT:    vpshufb %xmm13, %xmm18, %xmm26
-; AVX512BW-NEXT:    vporq %xmm12, %xmm26, %xmm12
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm26 = xmm18[0],xmm19[0],xmm18[1],xmm19[1],xmm18[2],xmm19[2],xmm18[3],xmm19[3],xmm18[4],xmm19[4],xmm18[5],xmm19[5],xmm18[6],xmm19[6],xmm18[7],xmm19[7]
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm26, %xmm11
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm11[0,1,0,1],zmm12[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k3}
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-NEXT:    vpshufb %xmm18, %xmm26, %xmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-NEXT:    vpshufb %xmm2, %xmm25, %xmm1
+; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm25[0],xmm26[0],xmm25[1],xmm26[1],xmm25[2],xmm26[2],xmm25[3],xmm26[3],xmm25[4],xmm26[4],xmm25[5],xmm26[5],xmm25[6],xmm26[6],xmm25[7],xmm26[7]
+; AVX512BW-NEXT:    vpshufb %xmm31, %xmm1, %xmm1
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-NEXT:    vpshufb %xmm0, %xmm19, %xmm1
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-NEXT:    vpshufb %xmm26, %xmm17, %xmm25
+; AVX512BW-NEXT:    vporq %xmm1, %xmm25, %xmm1
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm25, %xmm7
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,0,1],zmm1[0,1,0,1]
 ; AVX512BW-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm11, %zmm16 {%k3}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm30, %xmm26
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-NEXT:    vpshufb %xmm12, %xmm2, %xmm9
-; AVX512BW-NEXT:    vporq %xmm26, %xmm9, %xmm9
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm30[0],xmm2[1],xmm30[1],xmm2[2],xmm30[2],xmm2[3],xmm30[3],xmm2[4],xmm30[4],xmm2[5],xmm30[5],xmm2[6],xmm30[6],xmm2[7],xmm30[7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,0,1],zmm9[0,1,0,1]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm9 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-NEXT:    vpermw %zmm3, %zmm9, %zmm9
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm16 {%k3}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512BW-NEXT:    vpshufb %xmm1, %xmm20, %xmm7
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512BW-NEXT:    vpshufb %xmm25, %xmm22, %xmm31
+; AVX512BW-NEXT:    vporq %xmm7, %xmm31, %xmm7
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm31 = xmm22[0],xmm20[0],xmm22[1],xmm20[1],xmm22[2],xmm20[2],xmm22[3],xmm20[3],xmm22[4],xmm20[4],xmm22[5],xmm20[5],xmm22[6],xmm20[6],xmm22[7],xmm20[7]
+; AVX512BW-NEXT:    vpshufb %xmm5, %xmm31, %xmm5
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,0,1],zmm7[0,1,0,1]
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512BW-NEXT:    vpermw %zmm14, %zmm7, %zmm7
 ; AVX512BW-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm2 {%k3}
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k3}
 ; AVX512BW-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
 ; AVX512BW-NEXT:    kmovq %rax, %k3
-; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm16 {%k3}
-; AVX512BW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,1,1,4,4,5,5]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 {%k1} = ymm15[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512BW-NEXT:    vpshufb %ymm7, %ymm15, %ymm7
-; AVX512BW-NEXT:    vpshufb %ymm24, %ymm14, %ymm9
-; AVX512BW-NEXT:    vpor %ymm7, %ymm9, %ymm7
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[2,3,2,3],zmm7[2,3,2,3]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm18[18],zero,zmm18[18,19,20,21],zero,zmm18[19],zero,zmm18[25,26,27,22],zero,zmm18[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm18[56,57],zero,zmm18[55],zero,zmm18[53,54,55,58],zero,zmm18[56],zero,zmm18[60,61,58,59]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm9 = zmm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm19[18],zero,zero,zero,zero,zmm19[21],zero,zmm19[19],zero,zero,zero,zero,zmm19[22],zero,zmm19[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm19[57],zero,zmm19[55],zero,zero,zero,zero,zmm19[58],zero,zmm19[56],zero,zero,zero,zero
-; AVX512BW-NEXT:    vporq %zmm7, %zmm9, %zmm7
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm7 {%k2}
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[20],zero,zmm0[18],zero,zmm0[20,21,20,21],zero,zmm0[19],zero,zmm0[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[56,57,56,57],zero,zmm0[55],zero,zmm0[55,56,57,58],zero,zmm0[56],zero,zmm0[62,63]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm20, %zmm1, %zmm1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm1[20],zero,zmm1[18],zero,zero,zero,zero,zmm1[21],zero,zmm1[19],zero,zero,zero,zero,zmm1[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm1[57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero
-; AVX512BW-NEXT:    vporq %zmm0, %zmm1, %zmm0
-; AVX512BW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-NEXT:    vpermw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm5, %zmm16 {%k3}
+; AVX512BW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[0,0,1,1,4,4,5,5]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm5 {%k1} = ymm15[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512BW-NEXT:    vpshufb %ymm3, %ymm15, %ymm3
+; AVX512BW-NEXT:    vpshufb %ymm24, %ymm13, %ymm7
+; AVX512BW-NEXT:    vpor %ymm3, %ymm7, %ymm3
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm5[2,3,2,3],zmm3[2,3,2,3]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm17[18],zero,zmm17[18,19,20,21],zero,zmm17[19],zero,zmm17[25,26,27,22],zero,zmm17[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm17[56,57],zero,zmm17[55],zero,zmm17[53,54,55,58],zero,zmm17[56],zero,zmm17[60,61,58,59]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm7 = zmm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm19[18],zero,zero,zero,zero,zmm19[21],zero,zmm19[19],zero,zero,zero,zero,zmm19[22],zero,zmm19[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm19[57],zero,zmm19[55],zero,zero,zero,zero,zmm19[58],zero,zmm19[56],zero,zero,zero,zero
+; AVX512BW-NEXT:    vporq %zmm5, %zmm7, %zmm5
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm5 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm22[20],zero,zmm22[18],zero,zmm22[20,21,20,21],zero,zmm22[19],zero,zmm22[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm22[56,57,56,57],zero,zmm22[55],zero,zmm22[55,56,57,58],zero,zmm22[56],zero,zmm22[62,63]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm7 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm20[20],zero,zmm20[18],zero,zero,zero,zero,zmm20[21],zero,zmm20[19],zero,zero,zero,zero,zmm20[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm20[57],zero,zmm20[55],zero,zero,zero,zero,zmm20[58],zero,zmm20[56],zero,zero
+; AVX512BW-NEXT:    vporq %zmm3, %zmm7, %zmm3
+; AVX512BW-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm7 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512BW-NEXT:    vpermw %zmm14, %zmm7, %zmm7
 ; AVX512BW-NEXT:    movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
 ; AVX512BW-NEXT:    kmovq %rax, %k1
-; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm3 {%k1}
 ; AVX512BW-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512BW-NEXT:    kmovq %rax, %k1
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm7 {%k1}
-; AVX512BW-NEXT:    vpshufb %xmm25, %xmm29, %xmm0
-; AVX512BW-NEXT:    vpshufb %xmm13, %xmm31, %xmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm3, %zmm5 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm0, %xmm27, %xmm0
+; AVX512BW-NEXT:    vpshufb %xmm26, %xmm29, %xmm3
+; AVX512BW-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm27[8],xmm29[8],xmm27[9],xmm29[9],xmm27[10],xmm29[10],xmm27[11],xmm29[11],xmm27[12],xmm29[12],xmm27[13],xmm29[13],xmm27[14],xmm29[14],xmm27[15],xmm29[15]
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-NEXT:    vpshufb %xmm18, %xmm10, %xmm3
+; AVX512BW-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX512BW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512BW-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
+; AVX512BW-NEXT:    vpshufb %xmm1, %xmm28, %xmm0
+; AVX512BW-NEXT:    vpshufb %xmm25, %xmm30, %xmm1
 ; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm29[8],xmm31[8],xmm29[9],xmm31[9],xmm29[10],xmm31[10],xmm29[11],xmm31[11],xmm29[12],xmm31[12],xmm29[13],xmm31[13],xmm29[14],xmm31[14],xmm29[15],xmm31[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm30[8],xmm28[8],xmm30[9],xmm28[9],xmm30[10],xmm28[10],xmm30[11],xmm28[11],xmm30[12],xmm28[12],xmm30[13],xmm28[13],xmm30[14],xmm28[14],xmm30[15],xmm28[15]
+; AVX512BW-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm1[0,1,0,1]
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm10, %xmm1
-; AVX512BW-NEXT:    vpshufb %xmm6, %xmm8, %xmm2
-; AVX512BW-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,0,1],zmm2[0,1,0,1]
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k2}
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm27, %xmm0
-; AVX512BW-NEXT:    vpshufb %xmm12, %xmm28, %xmm2
-; AVX512BW-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm28[8],xmm27[8],xmm28[9],xmm27[9],xmm28[10],xmm27[10],xmm28[11],xmm27[11],xmm28[12],xmm27[12],xmm28[13],xmm27[13],xmm28[14],xmm27[14],xmm28[15],xmm27[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm2[0,1,0,1]
-; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
-; AVX512BW-NEXT:    vpermw %zmm3, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
+; AVX512BW-NEXT:    vpermw %zmm14, %zmm1, %zmm1
 ; AVX512BW-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512BW-NEXT:    kmovq %rax, %k1
-; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512BW-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
 ; AVX512BW-NEXT:    kmovq %rax, %k1
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k1}
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm7, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, 256(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm6, 320(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm16, (%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm23, 384(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm22, 192(%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm21, 192(%rax)
 ; AVX512BW-NEXT:    vmovdqa64 %zmm4, 64(%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512BW-FCP-LABEL: store_i8_stride7_vf64:
 ; AVX512BW-FCP:       # %bb.0:
-; AVX512BW-FCP-NEXT:    subq $200, %rsp
 ; AVX512BW-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm7
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm5
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rax), %zmm17
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdx), %ymm3
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm3, %ymm6
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm10
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm10, %ymm11
-; AVX512BW-FCP-NEXT:    vpor %ymm6, %ymm11, %ymm6
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm9, %ymm10, %ymm10
-; AVX512BW-FCP-NEXT:    vpor %ymm3, %ymm10, %ymm3
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm11
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm6, %ymm10
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm12
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm13, %ymm12, %ymm14
-; AVX512BW-FCP-NEXT:    vpor %ymm10, %ymm14, %ymm14
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm9, %ymm12, %ymm12
-; AVX512BW-FCP-NEXT:    vpor %ymm6, %ymm12, %ymm6
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm10
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rax), %zmm16
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdx), %ymm0
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm13, %ymm0, %ymm1
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm14, %ymm2, %ymm3
+; AVX512BW-FCP-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX512BW-FCP-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm5
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm9, %ymm5, %ymm6
+; AVX512BW-FCP-NEXT:    vpor %ymm3, %ymm6, %ymm3
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
+; AVX512BW-FCP-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm5
 ; AVX512BW-FCP-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512BW-FCP-NEXT:    kmovq %r10, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm11, %zmm10 {%k1}
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%r9), %ymm14
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm19, %ymm14, %ymm11
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%r8), %ymm12
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm20, %ymm12, %ymm15
-; AVX512BW-FCP-NEXT:    vpor %ymm11, %ymm15, %ymm15
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm6, %ymm12, %ymm16
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512BW-FCP-NEXT:    vpshufb %ymm6, %ymm14, %ymm14
-; AVX512BW-FCP-NEXT:    vporq %ymm16, %ymm14, %ymm14
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm14, %zmm15, %zmm14
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rax), %ymm15
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm16 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-FCP-NEXT:    vpermw %ymm15, %ymm16, %ymm16
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm5 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%r9), %ymm3
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm0 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm0, %ymm3, %ymm6
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%r8), %ymm7
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm7, %ymm8
+; AVX512BW-FCP-NEXT:    vpor %ymm6, %ymm8, %ymm8
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm4, %ymm7, %ymm10
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512BW-FCP-NEXT:    vpor %ymm3, %ymm10, %ymm3
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rax), %ymm10
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512BW-FCP-NEXT:    vpermw %ymm10, %ymm4, %ymm11
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
 ; AVX512BW-FCP-NEXT:    movabsq $145249953336295682, %rax # imm = 0x204081020408102
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm15, %zmm14 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm10, %zmm3 {%k2}
 ; AVX512BW-FCP-NEXT:    movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm14, %zmm10 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqa (%rdx), %ymm14
-; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm14, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa (%rcx), %ymm15
-; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm15, %ymm2
-; AVX512BW-FCP-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %xmm21
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %xmm22
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512BW-FCP-NEXT:    vpshufb %ymm3, %ymm16, %ymm2
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm5 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512BW-FCP-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
 ; AVX512BW-FCP-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX512BW-FCP-NEXT:    vpshufb %ymm13, %ymm11, %ymm3
+; AVX512BW-FCP-NEXT:    vpshufb %ymm9, %ymm11, %ymm3
 ; AVX512BW-FCP-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %xmm23
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm24
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm24[8],xmm23[8],xmm24[9],xmm23[9],xmm24[10],xmm23[10],xmm24[11],xmm23[11],xmm24[12],xmm23[12],xmm24[13],xmm23[13],xmm24[14],xmm23[14],xmm24[15],xmm23[15]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm13
+; AVX512BW-FCP-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdi), %xmm8
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm18
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm18[8],xmm3[8],xmm18[9],xmm3[9],xmm18[10],xmm3[10],xmm18[11],xmm3[11],xmm18[12],xmm3[12],xmm18[13],xmm3[13],xmm18[14],xmm3[14],xmm18[15],xmm3[15]
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm17 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm17, %xmm9, %xmm9
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm13, %ymm12, %ymm2
+; AVX512BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm14, %ymm13, %ymm14
+; AVX512BW-FCP-NEXT:    vpor %ymm2, %ymm14, %ymm2
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm19, %xmm14, %xmm14
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
 ; AVX512BW-FCP-NEXT:    movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm13 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %ymm18
-; AVX512BW-FCP-NEXT:    vpshufb %ymm19, %ymm18, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %ymm19
-; AVX512BW-FCP-NEXT:    vpshufb %ymm20, %ymm19, %ymm2
-; AVX512BW-FCP-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %xmm25
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %xmm27
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm27[8],xmm25[8],xmm27[9],xmm25[9],xmm27[10],xmm25[10],xmm27[11],xmm25[11],xmm27[12],xmm25[12],xmm27[13],xmm25[13],xmm27[14],xmm25[14],xmm27[15],xmm25[15]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512BW-FCP-NEXT:    vpermw %zmm17, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm0, %ymm14, %ymm0
+; AVX512BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FCP-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
+; AVX512BW-FCP-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm20 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm20, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512BW-FCP-NEXT:    vpermw %zmm16, %zmm1, %zmm1
 ; AVX512BW-FCP-NEXT:    movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
 ; AVX512BW-FCP-NEXT:    movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm13 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[4,5,6,7],zmm4[4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm4[4,5,6,7],zmm5[4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61],zero,zero
-; AVX512BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm7[4,5,6,7],zmm8[4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[27],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm2[60],zero,zmm2[62,63,62,63],zero,zmm2[61],zero,zmm2[63,60,61]
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm8[4,5,6,7],zmm7[4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zero,zero,zmm3[27],zero,zmm3[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61],zero,zero,zero
-; AVX512BW-FCP-NEXT:    vporq %zmm2, %zmm3, %zmm2
-; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm26
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm9 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm21 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm21, %xmm18, %xmm0
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm22 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm22, %xmm3, %xmm1
+; AVX512BW-FCP-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm18[0],xmm3[1],xmm18[1],xmm3[2],xmm18[2],xmm3[3],xmm18[3],xmm3[4],xmm18[4],xmm3[5],xmm18[5],xmm3[6],xmm18[6],xmm3[7],xmm18[7]
+; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm26 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm26, %xmm13, %xmm0
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm27 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm27, %xmm12, %xmm1
+; AVX512BW-FCP-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm23 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm23, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512BW-FCP-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm0[4,5,6,7],zmm26[4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm26[4,5,6,7],zmm0[4,5,6,7]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61]
-; AVX512BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm24 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm24, %xmm14, %xmm0
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm25, %xmm15, %xmm1
+; AVX512BW-FCP-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm29, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512BW-FCP-NEXT:    vpermw %zmm16, %zmm1, %zmm1
+; AVX512BW-FCP-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k2}
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
-; AVX512BW-FCP-NEXT:    vpermw %zmm17, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k3}
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm16[28,29,30],zero,ymm16[28],zero,ymm16[30,31,30,31],zero,ymm16[29],zero,ymm16[31,28,29]
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%rsi), %xmm28
+; AVX512BW-FCP-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k2}
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm28[0],xmm8[1],xmm28[1],xmm8[2],xmm28[2],xmm8[3],xmm28[3],xmm8[4],xmm28[4],xmm8[5],xmm28[5],xmm8[6],xmm28[6],xmm8[7],xmm28[7]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[28,29,30],zero,ymm10[28],zero,ymm10[30,31,30,31],zero,ymm10[29],zero,ymm10[31,28,29]
 ; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero,zero,zero
 ; AVX512BW-FCP-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rsi), %xmm6
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm31, %xmm3, %xmm3
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm1[2,3,2,3],zmm3[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm29 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29],zero,zero
-; AVX512BW-FCP-NEXT:    vporq %ymm1, %ymm29, %ymm29
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdx), %xmm2
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm29[2,3,2,3],zmm0[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm29 {%k1}
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm0 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm18[27],zero,zero,zero,zero,ymm18[30],zero,ymm18[28],zero,zero,zero,zero,ymm18[31],zero
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm19[27],zero,zero,zero,zero,ymm19[30],zero,ymm19[28],zero,zero,zero,zero,ymm19[31],zero,ymm19[29]
-; AVX512BW-FCP-NEXT:    vpor %ymm0, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%r9), %xmm5
-; AVX512BW-FCP-NEXT:    vmovdqa 32(%r8), %xmm4
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm30 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm30, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm3[2,3,2,3],zmm0[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-FCP-NEXT:    vpermw %zmm17, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512BW-FCP-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm23, %xmm1, %xmm1
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm23 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm30 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero
+; AVX512BW-FCP-NEXT:    vporq %ymm23, %ymm30, %ymm23
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[2,3,2,3],zmm1[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r9), %xmm30
+; AVX512BW-FCP-NEXT:    vmovdqa64 32(%r8), %xmm31
+; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm31[0],xmm30[0],xmm31[1],xmm30[1],xmm31[2],xmm30[2],xmm31[3],xmm30[3],xmm31[4],xmm30[4],xmm31[5],xmm30[5],xmm31[6],xmm30[6],xmm31[7],xmm30[7]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm29, %xmm0, %xmm0
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} ymm29 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[27],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29]
+; AVX512BW-FCP-NEXT:    vporq %ymm1, %ymm29, %ymm1
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-FCP-NEXT:    vpermw %zmm16, %zmm1, %zmm1
 ; AVX512BW-FCP-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512BW-FCP-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm0
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm1
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm0[4,5,6,7],zmm1[4,5,6,7]
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7],zmm0[4,5,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm29[23],zero,zmm29[21,22,23,26],zero,zmm29[24],zero,zmm29[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm29[62],zero,zmm29[60],zero,zero,zero,zero,zmm29[63],zero,zmm29[61],zero
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61],zero,zero
+; AVX512BW-FCP-NEXT:    vporq %zmm1, %zmm0, %zmm29
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm2
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm2[4,5,6,7],zmm1[4,5,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm6[23],zero,zero,zero,zero,zmm6[26],zero,zmm6[24],zero,zero,zero,zero,zmm6[27],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm6[60],zero,zmm6[62,63,62,63],zero,zmm6[61],zero,zmm6[63,60,61]
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm1[4,5,6,7],zmm2[4,5,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm7[23],zero,zero,zero,zero,zmm7[26],zero,zmm7[24],zero,zero,zero,zero,zmm7[27],zero,zmm7[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm7[62],zero,zmm7[60],zero,zero,zero,zero,zmm7[63],zero,zmm7[61],zero,zero,zero
+; AVX512BW-FCP-NEXT:    vporq %zmm6, %zmm7, %zmm6
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm7 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm29 = zmm6[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm7, %zmm29 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm6
+; AVX512BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm7
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm6[4,5,6,7],zmm7[4,5,6,7]
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm7[4,5,6,7],zmm6[4,5,6,7]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm0[23],zero,zmm0[23,24,25,26],zero,zmm0[24],zero,zmm0[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm6[25],zero,zmm6[23],zero,zero,zero,zero,zmm6[26],zero,zmm6[24],zero,zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm6[59],zero,zero,zero,zero,zmm6[62],zero,zmm6[60],zero,zero,zero,zero,zmm6[63],zero,zmm6[61]
+; AVX512BW-FCP-NEXT:    vporq %zmm0, %zmm6, %zmm0
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512BW-FCP-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
 ; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm29 {%k1}
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm9 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm9, %xmm1, %xmm3
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm8 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm8, %xmm2, %xmm28
-; AVX512BW-FCP-NEXT:    vporq %xmm3, %xmm28, %xmm3
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,0,1],zmm1[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm6, %xmm3
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm0 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm0, %xmm7, %xmm28
-; AVX512BW-FCP-NEXT:    vporq %xmm3, %xmm28, %xmm3
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm3[0,1,0,1],zmm6[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm28 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm1, %xmm5, %xmm3
-; AVX512BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm6, %xmm4, %xmm7
-; AVX512BW-FCP-NEXT:    vpor %xmm3, %xmm7, %xmm3
-; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512BW-FCP-NEXT:    vpermw %zmm16, %zmm0, %zmm0
+; AVX512BW-FCP-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm29 {%k2}
+; AVX512BW-FCP-NEXT:    vpshufb %xmm26, %xmm4, %xmm0
+; AVX512BW-FCP-NEXT:    vpshufb %xmm27, %xmm3, %xmm6
+; AVX512BW-FCP-NEXT:    vpor %xmm0, %xmm6, %xmm0
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm19, %xmm3, %xmm3
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm21, %xmm28, %xmm3
+; AVX512BW-FCP-NEXT:    vpshufb %xmm22, %xmm8, %xmm4
+; AVX512BW-FCP-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm28[8],xmm8[8],xmm28[9],xmm8[9],xmm28[10],xmm8[10],xmm28[11],xmm8[11],xmm28[12],xmm8[12],xmm28[13],xmm8[13],xmm28[14],xmm8[14],xmm28[15],xmm8[15]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm17, %xmm4, %xmm4
 ; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,0,1],zmm4[0,1,0,1]
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT:    vpshufb %xmm24, %xmm30, %xmm0
+; AVX512BW-FCP-NEXT:    vpshufb %xmm25, %xmm31, %xmm4
+; AVX512BW-FCP-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; AVX512BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm31[8],xmm30[8],xmm31[9],xmm30[9],xmm31[10],xmm30[10],xmm31[11],xmm30[11],xmm31[12],xmm30[12],xmm31[13],xmm30[13],xmm31[14],xmm30[14],xmm31[15],xmm30[15]
+; AVX512BW-FCP-NEXT:    vpshufb %xmm20, %xmm4, %xmm4
+; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm4[0,1,0,1]
 ; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm4 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
-; AVX512BW-FCP-NEXT:    vpermw %zmm17, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT:    vpermw %zmm16, %zmm4, %zmm4
 ; AVX512BW-FCP-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k2}
 ; AVX512BW-FCP-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm28 {%k1}
-; AVX512BW-FCP-NEXT:    vpshufb %xmm9, %xmm22, %xmm3
-; AVX512BW-FCP-NEXT:    vpshufb %xmm8, %xmm21, %xmm4
-; AVX512BW-FCP-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3],xmm21[4],xmm22[4],xmm21[5],xmm22[5],xmm21[6],xmm22[6],xmm21[7],xmm22[7]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,0,1],zmm3[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm2, %xmm24, %xmm2
-; AVX512BW-FCP-NEXT:    vpshufb %xmm0, %xmm23, %xmm0
-; AVX512BW-FCP-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm23[0],xmm24[0],xmm23[1],xmm24[1],xmm23[2],xmm24[2],xmm23[3],xmm24[3],xmm23[4],xmm24[4],xmm23[5],xmm24[5],xmm23[6],xmm24[6],xmm23[7],xmm24[7]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm31, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[0,1,0,1]
-; AVX512BW-FCP-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT:    vpshufb %xmm1, %xmm25, %xmm1
-; AVX512BW-FCP-NEXT:    vpshufb %xmm6, %xmm27, %xmm2
-; AVX512BW-FCP-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm27[0],xmm25[0],xmm27[1],xmm25[1],xmm27[2],xmm25[2],xmm27[3],xmm25[3],xmm27[4],xmm25[4],xmm27[5],xmm25[5],xmm27[6],xmm25[6],xmm27[7],xmm25[7]
-; AVX512BW-FCP-NEXT:    vpshufb %xmm30, %xmm2, %xmm2
-; AVX512BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,0,1],zmm1[0,1,0,1]
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-FCP-NEXT:    vpermw %zmm17, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k1}
-; AVX512BW-FCP-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k2}
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm12[18],zero,zmm12[18,19,20,21],zero,zmm12[19],zero,zmm12[25,26,27,22],zero,zmm12[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm12[56,57],zero,zmm12[55],zero,zmm12[53,54,55,58],zero,zmm12[56],zero,zmm12[60,61,58,59]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm4 = zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm13[18],zero,zero,zero,zero,zmm13[21],zero,zmm13[19],zero,zero,zero,zero,zmm13[22],zero,zmm13[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm13[57],zero,zmm13[55],zero,zero,zero,zero,zmm13[58],zero,zmm13[56],zero,zero,zero,zero
+; AVX512BW-FCP-NEXT:    vporq %zmm0, %zmm4, %zmm0
 ; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm1, %zmm1
 ; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
-; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm16, %zmm2, %zmm2
+; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm2, %zmm2
 ; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[21],zero,zmm2[19],zero,zero,zero,zero,zmm2[22],zero,zmm2[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm2[55],zero,zero,zero,zero,zmm2[58],zero,zmm2[56],zero,zero,zero,zero,zmm2[59],zero,zmm2[57]
 ; AVX512BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
-; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm2[18],zero,zmm2[18,19,20,21],zero,zmm2[19],zero,zmm2[25,26,27,22],zero,zmm2[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm2[56,57],zero,zmm2[55],zero,zmm2[53,54,55,58],zero,zmm2[56],zero,zmm2[60,61,58,59]
-; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm15, %zmm3, %zmm3
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[18],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm3[57],zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero
-; AVX512BW-FCP-NEXT:    vporq %zmm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
 ; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k2}
-; AVX512BW-FCP-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm19, %zmm1, %zmm1
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
-; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm18, %zmm26, %zmm3
-; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[20],zero,zmm3[18],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[57],zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero
-; AVX512BW-FCP-NEXT:    vporq %zmm1, %zmm3, %zmm1
-; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm3 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-FCP-NEXT:    vpermw %zmm17, %zmm3, %zmm3
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm15[20],zero,zmm15[18],zero,zmm15[20,21,20,21],zero,zmm15[19],zero,zmm15[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm15[56,57,56,57],zero,zmm15[55],zero,zmm15[55,56,57,58],zero,zmm15[56],zero,zmm15[62,63]
+; AVX512BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm14[20],zero,zmm14[18],zero,zero,zero,zero,zmm14[21],zero,zmm14[19],zero,zero,zero,zero,zmm14[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm14[57],zero,zmm14[55],zero,zero,zero,zero,zmm14[58],zero,zmm14[56],zero,zero
+; AVX512BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512BW-FCP-NEXT:    vpermw %zmm16, %zmm2, %zmm2
 ; AVX512BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
 ; AVX512BW-FCP-NEXT:    movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k1}
 ; AVX512BW-FCP-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512BW-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, 320(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm28, 256(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm29, 192(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, 384(%rax)
-; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-FCP-NEXT:    addq $200, %rsp
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm29, 384(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm23, 192(%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, 64(%rax)
 ; AVX512BW-FCP-NEXT:    vzeroupper
 ; AVX512BW-FCP-NEXT:    retq
 ;
 ; AVX512DQ-BW-LABEL: store_i8_stride7_vf64:
 ; AVX512DQ-BW:       # %bb.0:
 ; AVX512DQ-BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%rax), %zmm3
+; AVX512DQ-BW-NEXT:    vmovdqa64 (%rax), %zmm14
 ; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rdx), %ymm16
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm0, %ymm16, %ymm2
-; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rcx), %ymm17
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm17, %ymm4
-; AVX512DQ-BW-NEXT:    vpor %ymm2, %ymm4, %ymm2
-; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm4, %ymm16, %ymm4
-; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm17, %ymm5
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm2, %ymm16, %ymm1
+; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rcx), %ymm18
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm3, %ymm18, %ymm4
+; AVX512DQ-BW-NEXT:    vpor %ymm1, %ymm4, %ymm1
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm0, %ymm16, %ymm4
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm0, %ymm18, %ymm5
 ; AVX512DQ-BW-NEXT:    vpor %ymm4, %ymm5, %ymm4
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm4
 ; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rsi), %ymm24
 ; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rdi), %ymm23
-; AVX512DQ-BW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm23[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm23[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
 ; AVX512DQ-BW-NEXT:    movl $676341840, %r10d # imm = 0x28502850
 ; AVX512DQ-BW-NEXT:    kmovd %r10d, %k1
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm24[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm23, %ymm5
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm8, %ymm24, %ymm6
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm1 {%k1} = ymm24[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm8, %ymm23, %ymm5
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm10, %ymm24, %ymm6
 ; AVX512DQ-BW-NEXT:    vpor %ymm5, %ymm6, %ymm5
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm6
 ; AVX512DQ-BW-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512DQ-BW-NEXT:    kmovq %r10, %k2
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm5 {%k2}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm4, %zmm6 {%k2}
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm2, %ymm4, %ymm9
-; AVX512DQ-BW-NEXT:    vmovdqa 32(%r8), %ymm10
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm6, %ymm10, %ymm11
-; AVX512DQ-BW-NEXT:    vpor %ymm9, %ymm11, %ymm9
-; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm11, %ymm10, %ymm10
-; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX512DQ-BW-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm4, %ymm7
+; AVX512DQ-BW-NEXT:    vmovdqa 32(%r8), %ymm9
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm9, %ymm11
+; AVX512DQ-BW-NEXT:    vpor %ymm7, %ymm11, %ymm11
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
+; AVX512DQ-BW-NEXT:    vpor %ymm4, %ymm9, %ymm4
 ; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm9, %zmm4
-; AVX512DQ-BW-NEXT:    vmovdqa 32(%rax), %ymm9
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm10 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512DQ-BW-NEXT:    vpermw %ymm9, %ymm10, %ymm10
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm11, %zmm4
+; AVX512DQ-BW-NEXT:    vmovdqa 32(%rax), %ymm11
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512DQ-BW-NEXT:    vpermw %ymm11, %ymm0, %ymm12
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
 ; AVX512DQ-BW-NEXT:    movabsq $145249953336295682, %rax # imm = 0x204081020408102
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm9, %zmm4 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm11, %zmm4 {%k3}
 ; AVX512DQ-BW-NEXT:    movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm4, %zmm5 {%k3}
-; AVX512DQ-BW-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm14, %ymm4
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm4, %zmm6 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqa (%rdi), %ymm13
+; AVX512DQ-BW-NEXT:    vpshufb %ymm8, %ymm13, %ymm4
 ; AVX512DQ-BW-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512DQ-BW-NEXT:    vpshufb %ymm8, %ymm15, %ymm7
-; AVX512DQ-BW-NEXT:    vpor %ymm4, %ymm7, %ymm4
+; AVX512DQ-BW-NEXT:    vpshufb %ymm10, %ymm15, %ymm8
+; AVX512DQ-BW-NEXT:    vpor %ymm4, %ymm8, %ymm4
 ; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdi), %xmm25
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%rdi), %xmm8
 ; AVX512DQ-BW-NEXT:    vmovdqa64 (%rsi), %xmm26
 ; AVX512DQ-BW-NEXT:    vmovdqa 32(%rsi), %xmm10
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
-; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm0, %ymm18, %ymm0
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
+; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm2, %ymm17, %ymm2
 ; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm19, %ymm1
-; AVX512DQ-BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm19[8],xmm18[8],xmm19[9],xmm18[9],xmm19[10],xmm18[10],xmm19[11],xmm18[11],xmm19[12],xmm18[12],xmm19[13],xmm18[13],xmm19[14],xmm18[14],xmm19[15],xmm18[15]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-BW-NEXT:    vpshufb %ymm3, %ymm19, %ymm3
+; AVX512DQ-BW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512DQ-BW-NEXT:    movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm4 {%k3}
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%r9), %ymm20
-; AVX512DQ-BW-NEXT:    vpshufb %ymm2, %ymm20, %ymm0
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %ymm21
-; AVX512DQ-BW-NEXT:    vpshufb %ymm6, %ymm21, %ymm1
-; AVX512DQ-BW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT:    vmovdqa64 (%r9), %xmm30
-; AVX512DQ-BW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm30[8],xmm2[9],xmm30[9],xmm2[10],xmm30[10],xmm2[11],xmm30[11],xmm2[12],xmm30[12],xmm2[13],xmm30[13],xmm2[14],xmm30[14],xmm2[15],xmm30[15]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512DQ-BW-NEXT:    vpermw %zmm3, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm4 {%k3}
+; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm5, %ymm20, %ymm2
+; AVX512DQ-BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm22, %ymm3
+; AVX512DQ-BW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm22[8],xmm20[8],xmm22[9],xmm20[9],xmm22[10],xmm20[10],xmm22[11],xmm20[11],xmm22[12],xmm20[12],xmm22[13],xmm20[13],xmm22[14],xmm20[14],xmm22[15],xmm20[15]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm9 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512DQ-BW-NEXT:    vpermw %zmm14, %zmm3, %zmm3
 ; AVX512DQ-BW-NEXT:    movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm3, %zmm2 {%k3}
 ; AVX512DQ-BW-NEXT:    movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm4 {%k3}
-; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm14[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm4 {%k3}
+; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm13[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7]
+; AVX512DQ-BW-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14,13,12,15,14]
 ; AVX512DQ-BW-NEXT:    movl $338170920, %eax # imm = 0x14281428
 ; AVX512DQ-BW-NEXT:    kmovd %eax, %k3
-; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm15, %ymm0 {%k3}
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm6, %xmm6
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm6[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm18[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,3,4,6,7,7]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm3, %ymm15, %ymm2 {%k3}
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm31, %xmm5, %xmm5
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[2,3,2,3],zmm5[0,1,0,1]
 ; AVX512DQ-BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm19, %ymm9
-; AVX512DQ-BW-NEXT:    vmovdqu8 %ymm6, %ymm9 {%k1}
-; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rcx), %xmm29
-; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rdx), %xmm31
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm31[0],xmm29[0],xmm31[1],xmm29[1],xmm31[2],xmm29[2],xmm31[3],xmm29[3],xmm31[4],xmm29[4],xmm31[5],xmm29[5],xmm31[6],xmm29[6],xmm31[7],xmm29[7]
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm6, %xmm6
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm9[2,3,2,3],zmm6[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k2}
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm21[27],zero,zero,zero,zero,ymm21[30],zero,ymm21[28],zero,zero,zero,zero,ymm21[31],zero,ymm21[29]
-; AVX512DQ-BW-NEXT:    vpor %ymm0, %ymm6, %ymm0
-; AVX512DQ-BW-NEXT:    vmovdqa64 32(%r9), %xmm27
-; AVX512DQ-BW-NEXT:    vmovdqa64 32(%r8), %xmm28
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm28[0],xmm27[0],xmm28[1],xmm27[1],xmm28[2],xmm27[2],xmm28[3],xmm27[3],xmm28[4],xmm27[4],xmm28[5],xmm27[5],xmm28[6],xmm27[6],xmm28[7],xmm27[7]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm6[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm6 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-NEXT:    vpermw %zmm3, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm19, %ymm5
+; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm17[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,2,3,3,4,6,7,7]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %ymm7, %ymm5 {%k1}
+; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rcx), %xmm27
+; AVX512DQ-BW-NEXT:    vmovdqa64 32(%rdx), %xmm29
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm29[0],xmm27[0],xmm29[1],xmm27[1],xmm29[2],xmm27[2],xmm29[3],xmm27[3],xmm29[4],xmm27[4],xmm29[5],xmm27[5],xmm29[6],xmm27[6],xmm29[7],xmm27[7]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm7, %xmm21, %xmm21
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm5[2,3,2,3],zmm21[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm21 {%k2}
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm20[27],zero,zero,zero,zero,ymm20[30],zero,ymm20[28],zero,zero,zero,zero,ymm20[31],zero
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[27],zero,zero,zero,zero,ymm22[30],zero,ymm22[28],zero,zero,zero,zero,ymm22[31],zero,ymm22[29]
+; AVX512DQ-BW-NEXT:    vpor %ymm2, %ymm5, %ymm2
+; AVX512DQ-BW-NEXT:    vmovdqa64 32(%r9), %xmm28
+; AVX512DQ-BW-NEXT:    vmovdqa64 32(%r8), %xmm30
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm30[0],xmm28[0],xmm30[1],xmm28[1],xmm30[2],xmm28[2],xmm30[3],xmm28[3],xmm30[4],xmm28[4],xmm30[5],xmm28[5],xmm30[6],xmm28[6],xmm30[7],xmm28[7]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[2,3,2,3],zmm0[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-NEXT:    vpermw %zmm14, %zmm2, %zmm2
 ; AVX512DQ-BW-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm6, %zmm0 {%k2}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k2}
 ; AVX512DQ-BW-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k2}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm21 {%k2}
 ; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm23[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm24, %ymm0 {%k3}
-; AVX512DQ-BW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm24, %ymm6
+; AVX512DQ-BW-NEXT:    vpshufb %ymm3, %ymm24, %ymm0 {%k3}
+; AVX512DQ-BW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm3, %ymm24, %ymm2
 ; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
 ; AVX512DQ-BW-NEXT:    vpshufb %ymm24, %ymm23, %ymm23
-; AVX512DQ-BW-NEXT:    vporq %ymm6, %ymm23, %ymm6
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm6[2,3,2,3],zmm0[2,3,2,3]
+; AVX512DQ-BW-NEXT:    vporq %ymm2, %ymm23, %ymm2
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm2[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm16[24,25],zero,ymm16[23],zero,ymm16[21,22,23,26],zero,ymm16[24],zero,ymm16[28,29,26,27]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm17[25],zero,ymm17[23],zero,zero,zero,zero,ymm17[26],zero,ymm17[24],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT:    vpor %ymm0, %ymm6, %ymm0
-; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm17, %ymm1
-; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,3,4,6,7,7]
-; AVX512DQ-BW-NEXT:    vmovdqu8 %ymm6, %ymm1 {%k1}
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm18[25],zero,ymm18[23],zero,zero,zero,zero,ymm18[26],zero,ymm18[24],zero,zero,zero,zero
+; AVX512DQ-BW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512DQ-BW-NEXT:    vpshufb %ymm1, %ymm18, %ymm1
+; AVX512DQ-BW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm16[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,2,3,3,4,6,7,7]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
 ; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512DQ-BW-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k2
 ; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k2}
 ; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm0
 ; AVX512DQ-BW-NEXT:    vmovdqa64 (%r9), %zmm1
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm0[4,5,6,7],zmm1[4,5,6,7]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm6[23],zero,zmm6[23,24,25,26],zero,zmm6[24],zero,zmm6[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm6[59],zero,zero,zero,zero,zmm6[62],zero,zmm6[60],zero,zero,zero,zero,zmm6[63],zero
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm1[4,5,6,7],zmm0[4,5,6,7]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm16 = zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm16[25],zero,zmm16[23],zero,zero,zero,zero,zmm16[26],zero,zmm16[24],zero,zero,zmm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm16[59],zero,zero,zero,zero,zmm16[62],zero,zmm16[60],zero,zero,zero,zero,zmm16[63],zero,zmm16[61]
-; AVX512DQ-BW-NEXT:    vporq %zmm6, %zmm16, %zmm6
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm0[4,5,6,7],zmm1[4,5,6,7]
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7],zmm0[4,5,6,7]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm1 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm2[23],zero,zmm2[23,24,25,26],zero,zmm2[24],zero,zmm2[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61]
+; AVX512DQ-BW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
 ; AVX512DQ-BW-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm6, %zmm23 {%k2}
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm6 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
-; AVX512DQ-BW-NEXT:    vpermw %zmm3, %zmm6, %zmm6
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k2}
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512DQ-BW-NEXT:    vpermw %zmm14, %zmm0, %zmm0
 ; AVX512DQ-BW-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm6, %zmm23 {%k3}
-; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm17, %xmm26, %xmm16
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm6, %xmm25, %xmm13
-; AVX512DQ-BW-NEXT:    vporq %xmm16, %xmm13, %xmm13
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm25[0],xmm26[0],xmm25[1],xmm26[1],xmm25[2],xmm26[2],xmm25[3],xmm26[3],xmm25[4],xmm26[4],xmm25[5],xmm26[5],xmm25[6],xmm26[6],xmm25[7],xmm26[7]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm16, %xmm12
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm12[0,1,0,1],zmm13[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm25, %xmm19, %xmm12
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm13 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm13, %xmm18, %xmm26
-; AVX512DQ-BW-NEXT:    vporq %xmm12, %xmm26, %xmm12
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm26 = xmm18[0],xmm19[0],xmm18[1],xmm19[1],xmm18[2],xmm19[2],xmm18[3],xmm19[3],xmm18[4],xmm19[4],xmm18[5],xmm19[5],xmm18[6],xmm19[6],xmm18[7],xmm19[7]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm26, %xmm11
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm11[0,1,0,1],zmm12[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm18, %xmm26, %xmm0
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm2, %xmm25, %xmm1
+; AVX512DQ-BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm25[0],xmm26[0],xmm25[1],xmm26[1],xmm25[2],xmm26[2],xmm25[3],xmm26[3],xmm25[4],xmm26[4],xmm25[5],xmm26[5],xmm25[6],xmm26[6],xmm25[7],xmm26[7]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm31, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm0, %xmm19, %xmm1
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm26, %xmm17, %xmm25
+; AVX512DQ-BW-NEXT:    vporq %xmm1, %xmm25, %xmm1
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm7, %xmm25, %xmm7
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm7[0,1,0,1],zmm1[0,1,0,1]
 ; AVX512DQ-BW-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm11, %zmm16 {%k3}
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm11 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm30, %xmm26
-; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm2, %xmm9
-; AVX512DQ-BW-NEXT:    vporq %xmm26, %xmm9, %xmm9
-; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm30[0],xmm2[1],xmm30[1],xmm2[2],xmm30[2],xmm2[3],xmm30[3],xmm2[4],xmm30[4],xmm2[5],xmm30[5],xmm2[6],xmm30[6],xmm2[7],xmm30[7]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,0,1],zmm9[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm9 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512DQ-BW-NEXT:    vpermw %zmm3, %zmm9, %zmm9
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm1, %zmm16 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm1, %xmm20, %xmm7
+; AVX512DQ-BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm25, %xmm22, %xmm31
+; AVX512DQ-BW-NEXT:    vporq %xmm7, %xmm31, %xmm7
+; AVX512DQ-BW-NEXT:    vpunpcklbw {{.*#+}} xmm31 = xmm22[0],xmm20[0],xmm22[1],xmm20[1],xmm22[2],xmm20[2],xmm22[3],xmm20[3],xmm22[4],xmm20[4],xmm22[5],xmm20[5],xmm22[6],xmm20[6],xmm22[7],xmm20[7]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm5, %xmm31, %xmm5
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,0,1],zmm7[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512DQ-BW-NEXT:    vpermw %zmm14, %zmm7, %zmm7
 ; AVX512DQ-BW-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm9, %zmm2 {%k3}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm7, %zmm5 {%k3}
 ; AVX512DQ-BW-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm16 {%k3}
-; AVX512DQ-BW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,1,1,4,4,5,5]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm2 {%k1} = ymm15[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512DQ-BW-NEXT:    vpshufb %ymm7, %ymm15, %ymm7
-; AVX512DQ-BW-NEXT:    vpshufb %ymm24, %ymm14, %ymm9
-; AVX512DQ-BW-NEXT:    vpor %ymm7, %ymm9, %ymm7
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[2,3,2,3],zmm7[2,3,2,3]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm18[18],zero,zmm18[18,19,20,21],zero,zmm18[19],zero,zmm18[25,26,27,22],zero,zmm18[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm18[56,57],zero,zmm18[55],zero,zmm18[53,54,55,58],zero,zmm18[56],zero,zmm18[60,61,58,59]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm9 = zmm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm19[18],zero,zero,zero,zero,zmm19[21],zero,zmm19[19],zero,zero,zero,zero,zmm19[22],zero,zmm19[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm19[57],zero,zmm19[55],zero,zero,zero,zero,zmm19[58],zero,zmm19[56],zero,zero,zero,zero
-; AVX512DQ-BW-NEXT:    vporq %zmm7, %zmm9, %zmm7
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm7 {%k2}
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm0
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[20],zero,zmm0[18],zero,zmm0[20,21,20,21],zero,zmm0[19],zero,zmm0[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm0[56,57,56,57],zero,zmm0[55],zero,zmm0[55,56,57,58],zero,zmm0[56],zero,zmm0[62,63]
-; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm20, %zmm1, %zmm1
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm1[20],zero,zmm1[18],zero,zero,zero,zero,zmm1[21],zero,zmm1[19],zero,zero,zero,zero,zmm1[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm1[57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero
-; AVX512DQ-BW-NEXT:    vporq %zmm0, %zmm1, %zmm0
-; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512DQ-BW-NEXT:    vpermw %zmm3, %zmm1, %zmm1
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm5, %zmm16 {%k3}
+; AVX512DQ-BW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-BW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[0,0,1,1,4,4,5,5]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} ymm5 {%k1} = ymm15[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512DQ-BW-NEXT:    vpshufb %ymm3, %ymm15, %ymm3
+; AVX512DQ-BW-NEXT:    vpshufb %ymm24, %ymm13, %ymm7
+; AVX512DQ-BW-NEXT:    vpor %ymm3, %ymm7, %ymm3
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm5[2,3,2,3],zmm3[2,3,2,3]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm17[18],zero,zmm17[18,19,20,21],zero,zmm17[19],zero,zmm17[25,26,27,22],zero,zmm17[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm17[56,57],zero,zmm17[55],zero,zmm17[53,54,55,58],zero,zmm17[56],zero,zmm17[60,61,58,59]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm7 = zmm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm19[18],zero,zero,zero,zero,zmm19[21],zero,zmm19[19],zero,zero,zero,zero,zmm19[22],zero,zmm19[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm19[57],zero,zmm19[55],zero,zero,zero,zero,zmm19[58],zero,zmm19[56],zero,zero,zero,zero
+; AVX512DQ-BW-NEXT:    vporq %zmm5, %zmm7, %zmm5
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm3, %zmm5 {%k2}
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm22[20],zero,zmm22[18],zero,zmm22[20,21,20,21],zero,zmm22[19],zero,zmm22[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm22[56,57,56,57],zero,zmm22[55],zero,zmm22[55,56,57,58],zero,zmm22[56],zero,zmm22[62,63]
+; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} zmm7 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm20[20],zero,zmm20[18],zero,zero,zero,zero,zmm20[21],zero,zmm20[19],zero,zero,zero,zero,zmm20[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm20[57],zero,zmm20[55],zero,zero,zero,zero,zmm20[58],zero,zmm20[56],zero,zero
+; AVX512DQ-BW-NEXT:    vporq %zmm3, %zmm7, %zmm3
+; AVX512DQ-BW-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm7 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512DQ-BW-NEXT:    vpermw %zmm14, %zmm7, %zmm7
 ; AVX512DQ-BW-NEXT:    movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm7, %zmm3 {%k1}
 ; AVX512DQ-BW-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm7 {%k1}
-; AVX512DQ-BW-NEXT:    vpshufb %xmm25, %xmm29, %xmm0
-; AVX512DQ-BW-NEXT:    vpshufb %xmm13, %xmm31, %xmm1
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm3, %zmm5 {%k1}
+; AVX512DQ-BW-NEXT:    vpshufb %xmm0, %xmm27, %xmm0
+; AVX512DQ-BW-NEXT:    vpshufb %xmm26, %xmm29, %xmm3
+; AVX512DQ-BW-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm27[8],xmm29[8],xmm27[9],xmm29[9],xmm27[10],xmm29[10],xmm27[11],xmm29[11],xmm27[12],xmm29[12],xmm27[13],xmm29[13],xmm27[14],xmm29[14],xmm27[15],xmm29[15]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm3[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm18, %xmm10, %xmm3
+; AVX512DQ-BW-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX512DQ-BW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,0,1],zmm3[0,1,0,1]
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
+; AVX512DQ-BW-NEXT:    vpshufb %xmm1, %xmm28, %xmm0
+; AVX512DQ-BW-NEXT:    vpshufb %xmm25, %xmm30, %xmm1
 ; AVX512DQ-BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm29[8],xmm31[8],xmm29[9],xmm31[9],xmm29[10],xmm31[10],xmm29[11],xmm31[11],xmm29[12],xmm31[12],xmm29[13],xmm31[13],xmm29[14],xmm31[14],xmm29[15],xmm31[15]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm30[8],xmm28[8],xmm30[9],xmm28[9],xmm30[10],xmm28[10],xmm30[11],xmm28[11],xmm30[12],xmm28[12],xmm30[13],xmm28[13],xmm30[14],xmm28[14],xmm30[15],xmm28[15]
+; AVX512DQ-BW-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm1[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpshufb %xmm17, %xmm10, %xmm1
-; AVX512DQ-BW-NEXT:    vpshufb %xmm6, %xmm8, %xmm2
-; AVX512DQ-BW-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,0,1],zmm2[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k2}
-; AVX512DQ-BW-NEXT:    vpshufb %xmm11, %xmm27, %xmm0
-; AVX512DQ-BW-NEXT:    vpshufb %xmm12, %xmm28, %xmm2
-; AVX512DQ-BW-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512DQ-BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm28[8],xmm27[8],xmm28[9],xmm27[9],xmm28[10],xmm27[10],xmm28[11],xmm27[11],xmm28[12],xmm27[12],xmm28[13],xmm27[13],xmm28[14],xmm27[14],xmm28[15],xmm27[15]
-; AVX512DQ-BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm2[0,1,0,1]
-; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
-; AVX512DQ-BW-NEXT:    vpermw %zmm3, %zmm2, %zmm2
+; AVX512DQ-BW-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
+; AVX512DQ-BW-NEXT:    vpermw %zmm14, %zmm1, %zmm1
 ; AVX512DQ-BW-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k1}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512DQ-BW-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
 ; AVX512DQ-BW-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
+; AVX512DQ-BW-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k1}
 ; AVX512DQ-BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, 128(%rax)
-; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, 256(%rax)
+; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
+; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, 320(%rax)
 ; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm16, (%rax)
 ; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm23, 384(%rax)
-; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, 192(%rax)
+; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm21, 192(%rax)
 ; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm4, 64(%rax)
 ; AVX512DQ-BW-NEXT:    vzeroupper
 ; AVX512DQ-BW-NEXT:    retq
 ;
 ; AVX512DQ-BW-FCP-LABEL: store_i8_stride7_vf64:
 ; AVX512DQ-BW-FCP:       # %bb.0:
-; AVX512DQ-BW-FCP-NEXT:    subq $200, %rsp
 ; AVX512DQ-BW-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm7
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm5
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rax), %zmm17
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdx), %ymm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm3, %ymm6
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm10
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm10, %ymm11
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm6, %ymm11, %ymm6
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm9, %ymm10, %ymm10
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm3, %ymm10, %ymm3
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm11
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm6, %ymm10
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm12
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm13, %ymm12, %ymm14
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm10, %ymm14, %ymm14
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm9, %ymm12, %ymm12
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm6, %ymm12, %ymm6
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm10
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rax), %zmm16
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdx), %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm13, %ymm0, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm14, %ymm2, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rsi), %ymm5
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm9, %ymm5, %ymm6
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm3, %ymm6, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm5
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %r10, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm11, %zmm10 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r9), %ymm14
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm19, %ymm14, %ymm11
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r8), %ymm12
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm20, %ymm12, %ymm15
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm11, %ymm15, %ymm15
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm6, %ymm12, %ymm16
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm6, %ymm14, %ymm14
-; AVX512DQ-BW-FCP-NEXT:    vporq %ymm16, %ymm14, %ymm14
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm14, %zmm15, %zmm14
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rax), %ymm15
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} ymm16 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm15, %ymm16, %ymm16
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm15
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm5 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r9), %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm0 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm0, %ymm3, %ymm6
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r8), %ymm7
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm7, %ymm8
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm6, %ymm8, %ymm8
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm4, %ymm7, %ymm10
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm3, %ymm10, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rax), %ymm10
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %ymm10, %ymm4, %ymm11
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $145249953336295682, %rax # imm = 0x204081020408102
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm15, %zmm14 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm10, %zmm3 {%k2}
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm14, %zmm10 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdx), %ymm14
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm14, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rcx), %ymm15
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm15, %ymm2
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %xmm21
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %xmm22
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm3, %ymm16, %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm5 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
 ; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm13, %ymm11, %ymm3
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm9, %ymm11, %ymm3
 ; AVX512DQ-BW-FCP-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %xmm23
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm24
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm24[8],xmm23[8],xmm24[9],xmm23[9],xmm24[10],xmm23[10],xmm24[11],xmm23[11],xmm24[12],xmm23[12],xmm24[13],xmm23[13],xmm24[14],xmm23[14],xmm24[15],xmm23[15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm13
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdi), %xmm8
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %xmm18
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm18[8],xmm3[8],xmm18[9],xmm3[9],xmm18[10],xmm3[10],xmm18[11],xmm3[11],xmm18[12],xmm3[12],xmm18[13],xmm3[13],xmm18[14],xmm3[14],xmm18[15],xmm3[15]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm17 = [2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm17, %xmm9, %xmm9
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm13, %ymm12, %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm14, %ymm13, %ymm14
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm2, %ymm14, %ymm2
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm14 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm19, %xmm14, %xmm14
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm13 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %ymm18
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm19, %ymm18, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %ymm19
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm20, %ymm19, %ymm2
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %xmm25
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %xmm27
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm27[8],xmm25[8],xmm27[9],xmm25[9],xmm27[10],xmm25[10],xmm27[11],xmm25[11],xmm27[12],xmm25[12],xmm27[13],xmm25[13],xmm27[14],xmm25[14],xmm27[15],xmm25[15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm17, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm14 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm0, %ymm14, %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm20 = [u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm20, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6,6,1,6,0,7,1,0,7,14,9,14,8,15,9,8,15]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm16, %zmm1, %zmm1
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $2323999253380730912, %rax # imm = 0x2040810204081020
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm13 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[4,5,6,7],zmm4[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm4[4,5,6,7],zmm5[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61],zero,zero
-; AVX512DQ-BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm7[4,5,6,7],zmm8[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zero,zero,zmm2[27],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm2[60],zero,zmm2[62,63,62,63],zero,zmm2[61],zero,zmm2[63,60,61]
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm8[4,5,6,7],zmm7[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zero,zero,zmm3[27],zero,zmm3[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61],zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT:    vporq %zmm2, %zmm3, %zmm2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm26
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm20 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm9 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm21 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm21, %xmm18, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm22 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm22, %xmm3, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm18[0],xmm3[1],xmm18[1],xmm3[2],xmm18[2],xmm3[3],xmm18[3],xmm3[4],xmm18[4],xmm3[5],xmm18[5],xmm3[6],xmm18[6],xmm3[7],xmm18[7]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm26 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm26, %xmm13, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm27 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm27, %xmm12, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm23 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm23, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm0[4,5,6,7],zmm26[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm1[23],zero,zmm1[23,24,25,26],zero,zmm1[24],zero,zmm1[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm26[4,5,6,7],zmm0[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[25],zero,zmm2[23],zero,zero,zero,zero,zmm2[26],zero,zmm2[24],zero,zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero,zmm2[61]
-; AVX512DQ-BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm24 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm24, %xmm14, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm25 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm25, %xmm15, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm29 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm29, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm16, %zmm1, %zmm1
+; AVX512DQ-BW-FCP-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm17, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k3}
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm16[28,29,30],zero,ymm16[28],zero,ymm16[30,31,30,31],zero,ymm16[29],zero,ymm16[31,28,29]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%rsi), %xmm28
+; AVX512DQ-BW-FCP-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm28[0],xmm8[1],xmm28[1],xmm8[2],xmm28[2],xmm8[3],xmm28[3],xmm8[4],xmm28[4],xmm8[5],xmm28[5],xmm8[6],xmm28[6],xmm8[7],xmm28[7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[28,29,30],zero,ymm10[28],zero,ymm10[30,31,30,31],zero,ymm10[29],zero,ymm10[31,28,29]
 ; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero,zero,zero
 ; AVX512DQ-BW-FCP-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rsi), %xmm6
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm31 = [0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm31, %xmm3, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm1[2,3,2,3],zmm3[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm29 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29],zero,zero
-; AVX512DQ-BW-FCP-NEXT:    vporq %ymm1, %ymm29, %ymm29
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdx), %xmm2
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm29[2,3,2,3],zmm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm29 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm0 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm18[27],zero,zero,zero,zero,ymm18[30],zero,ymm18[28],zero,zero,zero,zero,ymm18[31],zero
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm19[27],zero,zero,zero,zero,ymm19[30],zero,ymm19[28],zero,zero,zero,zero,ymm19[31],zero,ymm19[29]
-; AVX512DQ-BW-FCP-NEXT:    vpor %ymm0, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r9), %xmm5
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%r8), %xmm4
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm30 = [u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm30, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm3[2,3,2,3],zmm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm17, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm23, %xmm1, %xmm1
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm23 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm30 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero
+; AVX512DQ-BW-FCP-NEXT:    vporq %ymm23, %ymm30, %ymm23
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[2,3,2,3],zmm1[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r9), %xmm30
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 32(%r8), %xmm31
+; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm31[0],xmm30[0],xmm31[1],xmm30[1],xmm31[2],xmm30[2],xmm31[3],xmm30[3],xmm31[4],xmm30[4],xmm31[5],xmm30[5],xmm31[6],xmm30[6],xmm31[7],xmm30[7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm29, %xmm0, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm1 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} ymm29 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[27],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29]
+; AVX512DQ-BW-FCP-NEXT:    vporq %ymm1, %ymm29, %ymm1
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm1 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm16, %zmm1, %zmm1
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm23 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm1
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm0[4,5,6,7],zmm1[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7],zmm0[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm29[23],zero,zmm29[21,22,23,26],zero,zmm29[24],zero,zmm29[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm29[62],zero,zmm29[60],zero,zero,zero,zero,zmm29[63],zero,zmm29[61],zero
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm0[25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zero,zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61],zero,zero
+; AVX512DQ-BW-FCP-NEXT:    vporq %zmm1, %zmm0, %zmm29
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm2
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm2[4,5,6,7],zmm1[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm6[23],zero,zero,zero,zero,zmm6[26],zero,zmm6[24],zero,zero,zero,zero,zmm6[27],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm6[60],zero,zmm6[62,63,62,63],zero,zmm6[61],zero,zmm6[63,60,61]
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm1[4,5,6,7],zmm2[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm7[23],zero,zero,zero,zero,zmm7[26],zero,zmm7[24],zero,zero,zero,zero,zmm7[27],zero,zmm7[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm7[62],zero,zmm7[60],zero,zero,zero,zero,zmm7[63],zero,zmm7[61],zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT:    vporq %zmm6, %zmm7, %zmm6
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm7 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm29 = zmm6[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm7, %zmm29 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm6
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r9), %zmm7
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm6[4,5,6,7],zmm7[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm7[4,5,6,7],zmm6[4,5,6,7]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm0[23],zero,zmm0[23,24,25,26],zero,zmm0[24],zero,zmm0[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm6 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm6[25],zero,zmm6[23],zero,zero,zero,zero,zmm6[26],zero,zmm6[24],zero,zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm6[59],zero,zero,zero,zero,zmm6[62],zero,zmm6[60],zero,zero,zero,zero,zmm6[63],zero,zmm6[61]
+; AVX512DQ-BW-FCP-NEXT:    vporq %zmm0, %zmm6, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512DQ-BW-FCP-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
 ; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm29 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm9 = [u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm9, %xmm1, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm8 = [u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm8, %xmm2, %xmm28
-; AVX512DQ-BW-FCP-NEXT:    vporq %xmm3, %xmm28, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,0,1],zmm1[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm6, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm0 = [u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm0, %xmm7, %xmm28
-; AVX512DQ-BW-FCP-NEXT:    vporq %xmm3, %xmm28, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm3[0,1,0,1],zmm6[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm28 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm1 = [128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm1, %xmm5, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm6, %xmm4, %xmm7
-; AVX512DQ-BW-FCP-NEXT:    vpor %xmm3, %xmm7, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm0 = [27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28,30,29,30,31,31,30,30,31,30,29,30,31,31,30,30,31]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm16, %zmm0, %zmm0
+; AVX512DQ-BW-FCP-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm29 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm26, %xmm4, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm27, %xmm3, %xmm6
+; AVX512DQ-BW-FCP-NEXT:    vpor %xmm0, %xmm6, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm19, %xmm3, %xmm3
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm3[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm21, %xmm28, %xmm3
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm22, %xmm8, %xmm4
+; AVX512DQ-BW-FCP-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm28[8],xmm8[8],xmm28[9],xmm8[9],xmm28[10],xmm8[10],xmm28[11],xmm8[11],xmm28[12],xmm8[12],xmm28[13],xmm8[13],xmm28[14],xmm8[14],xmm28[15],xmm8[15]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm17, %xmm4, %xmm4
 ; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,0,1],zmm4[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm24, %xmm30, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm25, %xmm31, %xmm4
+; AVX512DQ-BW-FCP-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; AVX512DQ-BW-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm31[8],xmm30[8],xmm31[9],xmm30[9],xmm31[10],xmm30[10],xmm31[11],xmm30[11],xmm31[12],xmm30[12],xmm31[13],xmm30[13],xmm31[14],xmm30[14],xmm31[15],xmm30[15]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm20, %xmm4, %xmm4
+; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm4[0,1,0,1]
 ; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm4 = [18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20,20,21,20,21,21,22,21,22,20,21,20,21,21,22,21,22]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm17, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm16, %zmm4, %zmm4
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm4, %zmm0 {%k2}
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm28 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm9, %xmm22, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm8, %xmm21, %xmm4
-; AVX512DQ-BW-FCP-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3],xmm21[4],xmm22[4],xmm21[5],xmm22[5],xmm21[6],xmm22[6],xmm21[7],xmm22[7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,0,1],zmm3[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm2, %xmm24, %xmm2
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm0, %xmm23, %xmm0
-; AVX512DQ-BW-FCP-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm23[0],xmm24[0],xmm23[1],xmm24[1],xmm23[2],xmm24[2],xmm23[3],xmm24[3],xmm23[4],xmm24[4],xmm23[5],xmm24[5],xmm23[6],xmm24[6],xmm23[7],xmm24[7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm31, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm1, %xmm25, %xmm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm6, %xmm27, %xmm2
-; AVX512DQ-BW-FCP-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512DQ-BW-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm27[0],xmm25[0],xmm27[1],xmm25[1],xmm27[2],xmm25[2],xmm27[3],xmm25[3],xmm27[4],xmm25[4],xmm27[5],xmm25[5],xmm27[6],xmm25[6],xmm27[7],xmm25[7]
-; AVX512DQ-BW-FCP-NEXT:    vpshufb %xmm30, %xmm2, %xmm2
-; AVX512DQ-BW-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,0,1],zmm1[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm17, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
-; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k2
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k2}
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm12[18],zero,zmm12[18,19,20,21],zero,zmm12[19],zero,zmm12[25,26,27,22],zero,zmm12[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm12[56,57],zero,zmm12[55],zero,zmm12[53,54,55,58],zero,zmm12[56],zero,zmm12[60,61,58,59]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm4 = zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm13[18],zero,zero,zero,zero,zmm13[21],zero,zmm13[19],zero,zero,zero,zero,zmm13[22],zero,zmm13[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm13[57],zero,zmm13[55],zero,zero,zero,zero,zmm13[58],zero,zmm13[56],zero,zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT:    vporq %zmm0, %zmm4, %zmm0
 ; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm1, %zmm1
 ; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm16, %zmm2, %zmm2
+; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm2, %zmm2
 ; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm2[21],zero,zmm2[19],zero,zero,zero,zero,zmm2[22],zero,zmm2[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm2[55],zero,zero,zero,zero,zmm2[58],zero,zmm2[56],zero,zero,zero,zero,zmm2[59],zero,zmm2[57]
 ; AVX512DQ-BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm2[18],zero,zmm2[18,19,20,21],zero,zmm2[19],zero,zmm2[25,26,27,22],zero,zmm2[20],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm2[56,57],zero,zmm2[55],zero,zmm2[53,54,55,58],zero,zmm2[56],zero,zmm2[60,61,58,59]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm15, %zmm3, %zmm3
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[18],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm3[57],zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT:    vporq %zmm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
 ; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k2}
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm19, %zmm1, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm1[56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
-; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm18, %zmm26, %zmm3
-; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[20],zero,zmm3[18],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[57],zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero
-; AVX512DQ-BW-FCP-NEXT:    vporq %zmm1, %zmm3, %zmm1
-; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm3 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm17, %zmm3, %zmm3
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm15[20],zero,zmm15[18],zero,zmm15[20,21,20,21],zero,zmm15[19],zero,zmm15[19,20,21,22],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zmm15[56,57,56,57],zero,zmm15[55],zero,zmm15[55,56,57,58],zero,zmm15[56],zero,zmm15[62,63]
+; AVX512DQ-BW-FCP-NEXT:    vpshufb {{.*#+}} zmm2 = zmm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm14[20],zero,zmm14[18],zero,zero,zero,zero,zmm14[21],zero,zmm14[19],zero,zero,zero,zero,zmm14[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm14[57],zero,zmm14[55],zero,zero,zero,zero,zmm14[58],zero,zmm14[56],zero,zero
+; AVX512DQ-BW-FCP-NEXT:    vporq %zmm1, %zmm2, %zmm1
+; AVX512DQ-BW-FCP-NEXT:    vpmovsxbw {{.*#+}} zmm2 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512DQ-BW-FCP-NEXT:    vpermw %zmm16, %zmm2, %zmm2
 ; AVX512DQ-BW-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $1161999626690365456, %rax # imm = 0x1020408102040810
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm3, %zmm1 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k1}
 ; AVX512DQ-BW-FCP-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512DQ-BW-FCP-NEXT:    kmovq %rax, %k1
-; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
 ; AVX512DQ-BW-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, 320(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm28, 256(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm29, 192(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, 384(%rax)
-; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT:    addq $200, %rsp
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm29, 384(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm23, 192(%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, 64(%rax)
 ; AVX512DQ-BW-FCP-NEXT:    vzeroupper
 ; AVX512DQ-BW-FCP-NEXT:    retq
   %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 44b5162812c0e..47a6022e428c3 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -1669,12 +1669,12 @@ define void @splat2_v4f64_load_store(ptr %s, ptr %d) nounwind {
 define void @splat2_v4i64_load_store(ptr %s, ptr %d) nounwind {
 ; AVX1-LABEL: splat2_v4i64_load_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vperm2f128 $51, (%rdi), %ymm0, %ymm0 # ymm0 = mem[2,3,2,3]
+; AVX1-NEXT:    vbroadcastf128 (%rdi), %ymm0 # ymm0 = mem[0,1,0,1]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0,0,3,3]
-; AVX1-NEXT:    vbroadcastf128 (%rdi), %ymm1 # ymm1 = mem[0,1,0,1]
+; AVX1-NEXT:    vbroadcastf128 16(%rdi), %ymm1 # ymm1 = mem[0,1,0,1]
 ; AVX1-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0,0,3,3]
-; AVX1-NEXT:    vmovupd %ymm0, 32(%rsi)
-; AVX1-NEXT:    vmovupd %ymm1, (%rsi)
+; AVX1-NEXT:    vmovupd %ymm1, 32(%rsi)
+; AVX1-NEXT:    vmovupd %ymm0, (%rsi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index bd6cbbaadc955..45d589b6c988e 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1884,17 +1884,16 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ;
 ; AVX2-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,u,0,u,0,u,0,u,0,u,0,u,0,u,0,u,16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,u,0,u,0,u,0,u,0,u,0,u,0,u,0,u,16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero,ymm0[16],zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vpbroadcastw {{.*#+}} ymm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
-; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
+; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -2133,17 +2132,16 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ;
 ; AVX2-LABEL: vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u,16],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u,16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16],zero,zero,zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
+; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -2384,17 +2382,16 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.e
 ;
 ; AVX2-LABEL: vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u,16],zero,zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u,16],zero,zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
+; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -2636,15 +2633,15 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
 ; AVX2-NEXT:    vpmovzxbq {{.*#+}} ymm1 = [255,0,255,0]
-; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
-; AVX2-NEXT:    vpmovsxwq {{.*#+}} ymm3 = [18446744073709551360,18446744073709551615,18446744073709551360,18446744073709551615]
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpand (%rdi), %xmm1, %xmm1
-; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX2-NEXT:    vmovdqa %ymm1, 32(%rdx)
-; AVX2-NEXT:    vmovdqa %ymm0, (%rdx)
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm3 = mem[2,3],ymm2[2,3]
+; AVX2-NEXT:    vpmovsxwq {{.*#+}} ymm4 = [18446744073709551360,18446744073709551615,18446744073709551360,18446744073709551615]
+; AVX2-NEXT:    vpblendvb %ymm4, %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb (%rsi), %ymm2, %ymm1
+; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -3484,16 +3481,16 @@ define void @vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3(ptr %i
 ;
 ; AVX2-LABEL: vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vpblendw {{.*#+}} xmm0 = mem[0],xmm0[1,2,3,4,5,6,7]
-; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
-; AVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
-; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)
-; AVX2-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm0
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vmovdqa %ymm0, (%rdx)
+; AVX2-NEXT:    vmovdqa %ymm1, 32(%rdx)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;



More information about the llvm-commits mailing list